diff options
-rw-r--r-- | applypatch/applypatch.cpp | 386 | ||||
-rw-r--r-- | edify/expr.cpp | 2 | ||||
-rw-r--r-- | install.cpp | 14 | ||||
-rw-r--r-- | otafault/ota_io.cpp | 2 | ||||
-rw-r--r-- | otafault/ota_io.h | 2 | ||||
-rw-r--r-- | recovery.cpp | 72 | ||||
-rw-r--r-- | screen_ui.cpp | 6 | ||||
-rw-r--r-- | screen_ui.h | 6 | ||||
-rw-r--r-- | tests/component/applypatch_test.cpp | 124 | ||||
-rw-r--r-- | ui.cpp | 2 | ||||
-rw-r--r-- | updater/blockimg.cpp | 795 | ||||
-rw-r--r-- | verifier.cpp | 2 | ||||
-rw-r--r-- | verifier.h | 4 | ||||
-rw-r--r-- | wear_touch.cpp | 2 |
14 files changed, 524 insertions, 895 deletions
diff --git a/applypatch/applypatch.cpp b/applypatch/applypatch.cpp index 500663120..7be3fdbde 100644 --- a/applypatch/applypatch.cpp +++ b/applypatch/applypatch.cpp @@ -32,6 +32,7 @@ #include <utility> #include <vector> +#include <android-base/logging.h> #include <android-base/parseint.h> #include <android-base/strings.h> #include <openssl/sha.h> @@ -42,15 +43,9 @@ static int LoadPartitionContents(const std::string& filename, FileContents* file); static ssize_t FileSink(const unsigned char* data, ssize_t len, void* token); -static int GenerateTarget(FileContents* source_file, - const Value* source_patch_value, - FileContents* copy_file, - const Value* copy_patch_value, - const char* source_filename, - const char* target_filename, - const uint8_t target_sha1[SHA_DIGEST_LENGTH], - size_t target_size, - const Value* bonus_data); +static int GenerateTarget(const FileContents& source_file, const std::unique_ptr<Value>& patch, + const std::string& target_filename, + const uint8_t target_sha1[SHA_DIGEST_LENGTH], const Value* bonus_data); // Read a file into memory; store the file contents and associated metadata in *file. // Return 0 on success. @@ -190,7 +185,6 @@ static int LoadPartitionContents(const std::string& filename, FileContents* file return 0; } - // Save the contents of the given FileContents object under the given // filename. Return 0 on success. int SaveFileContents(const char* filename, const FileContents* file) { @@ -480,108 +474,90 @@ int CacheSizeCheck(size_t bytes) { } } -// This function applies binary patches to files in a way that is safe -// (the original file is not touched until we have the desired -// replacement for it) and idempotent (it's okay to run this program -// multiple times). +// This function applies binary patches to EMMC target files in a way that is safe (the original +// file is not touched until we have the desired replacement for it) and idempotent (it's okay to +// run this program multiple times). // -// - if the sha1 hash of <target_filename> is <target_sha1_string>, -// does nothing and exits successfully. +// - If the SHA-1 hash of <target_filename> is <target_sha1_string>, does nothing and exits +// successfully. // -// - otherwise, if the sha1 hash of <source_filename> is one of the -// entries in <patch_sha1_str>, the corresponding patch from -// <patch_data> (which must be a VAL_BLOB) is applied to produce a -// new file (the type of patch is automatically detected from the -// blob data). If that new file has sha1 hash <target_sha1_str>, -// moves it to replace <target_filename>, and exits successfully. -// Note that if <source_filename> and <target_filename> are not the -// same, <source_filename> is NOT deleted on success. -// <target_filename> may be the string "-" to mean "the same as -// source_filename". +// - Otherwise, if the SHA-1 hash of <source_filename> is one of the entries in <patch_sha1_str>, +// the corresponding patch from <patch_data> (which must be a VAL_BLOB) is applied to produce a +// new file (the type of patch is automatically detected from the blob data). If that new file +// has SHA-1 hash <target_sha1_str>, moves it to replace <target_filename>, and exits +// successfully. Note that if <source_filename> and <target_filename> are not the same, +// <source_filename> is NOT deleted on success. <target_filename> may be the string "-" to mean +// "the same as <source_filename>". // -// - otherwise, or if any error is encountered, exits with non-zero -// status. +// - Otherwise, or if any error is encountered, exits with non-zero status. // -// <source_filename> may refer to a partition to read the source data. -// See the comments for the LoadPartitionContents() function above -// for the format of such a filename. - -int applypatch(const char* source_filename, - const char* target_filename, - const char* target_sha1_str, - size_t target_size, +// <source_filename> must refer to an EMMC partition to read the source data. See the comments for +// the LoadPartitionContents() function above for the format of such a filename. <target_size> has +// become obsolete since we have dropped the support for patching non-EMMC targets (EMMC targets +// have the size embedded in the filename). +int applypatch(const char* source_filename, const char* target_filename, + const char* target_sha1_str, size_t target_size __unused, const std::vector<std::string>& patch_sha1_str, - const std::vector<std::unique_ptr<Value>>& patch_data, - const Value* bonus_data) { - printf("patch %s: ", source_filename); + const std::vector<std::unique_ptr<Value>>& patch_data, const Value* bonus_data) { + printf("patch %s: ", source_filename); - if (target_filename[0] == '-' && target_filename[1] == '\0') { - target_filename = source_filename; - } + if (target_filename[0] == '-' && target_filename[1] == '\0') { + target_filename = source_filename; + } - uint8_t target_sha1[SHA_DIGEST_LENGTH]; - if (ParseSha1(target_sha1_str, target_sha1) != 0) { - printf("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str); - return 1; - } + if (strncmp(target_filename, "EMMC:", 5) != 0) { + printf("Supporting patching EMMC targets only.\n"); + return 1; + } - FileContents source_file; - const Value* source_patch_value = nullptr; + uint8_t target_sha1[SHA_DIGEST_LENGTH]; + if (ParseSha1(target_sha1_str, target_sha1) != 0) { + printf("failed to parse tgt-sha1 \"%s\"\n", target_sha1_str); + return 1; + } - // We try to load the target file into the source_file object. - if (LoadFileContents(target_filename, &source_file) == 0) { - if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_LENGTH) == 0) { - // The early-exit case: the patch was already applied, this file - // has the desired hash, nothing for us to do. - printf("already %s\n", short_sha1(target_sha1).c_str()); - return 0; - } + // We try to load the target file into the source_file object. + FileContents source_file; + if (LoadFileContents(target_filename, &source_file) == 0) { + if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_LENGTH) == 0) { + // The early-exit case: the patch was already applied, this file has the desired hash, nothing + // for us to do. + printf("already %s\n", short_sha1(target_sha1).c_str()); + return 0; } + } - if (source_file.data.empty() || - (target_filename != source_filename && - strcmp(target_filename, source_filename) != 0)) { - // Need to load the source file: either we failed to load the - // target file, or we did but it's different from the source file. - source_file.data.clear(); - LoadFileContents(source_filename, &source_file); - } + if (source_file.data.empty() || + (target_filename != source_filename && strcmp(target_filename, source_filename) != 0)) { + // Need to load the source file: either we failed to load the target file, or we did but it's + // different from the expected. + source_file.data.clear(); + LoadFileContents(source_filename, &source_file); + } - if (!source_file.data.empty()) { - int to_use = FindMatchingPatch(source_file.sha1, patch_sha1_str); - if (to_use >= 0) { - source_patch_value = patch_data[to_use].get(); - } + if (!source_file.data.empty()) { + int to_use = FindMatchingPatch(source_file.sha1, patch_sha1_str); + if (to_use != -1) { + return GenerateTarget(source_file, patch_data[to_use], target_filename, target_sha1, + bonus_data); } + } - FileContents copy_file; - const Value* copy_patch_value = nullptr; - if (source_patch_value == nullptr) { - source_file.data.clear(); - printf("source file is bad; trying copy\n"); - - if (LoadFileContents(CACHE_TEMP_SOURCE, ©_file) < 0) { - // fail. - printf("failed to read copy file\n"); - return 1; - } + printf("source file is bad; trying copy\n"); - int to_use = FindMatchingPatch(copy_file.sha1, patch_sha1_str); - if (to_use >= 0) { - copy_patch_value = patch_data[to_use].get(); - } + FileContents copy_file; + if (LoadFileContents(CACHE_TEMP_SOURCE, ©_file) < 0) { + printf("failed to read copy file\n"); + return 1; + } - if (copy_patch_value == nullptr) { - // fail. - printf("copy file doesn't match source SHA-1s either\n"); - return 1; - } - } + int to_use = FindMatchingPatch(copy_file.sha1, patch_sha1_str); + if (to_use == -1) { + printf("copy file doesn't match source SHA-1s either\n"); + return 1; + } - return GenerateTarget(&source_file, source_patch_value, - ©_file, copy_patch_value, - source_filename, target_filename, - target_sha1, target_size, bonus_data); + return GenerateTarget(copy_file, patch_data[to_use], target_filename, target_sha1, bonus_data); } /* @@ -638,34 +614,9 @@ int applypatch_flash(const char* source_filename, const char* target_filename, return 0; } -static int GenerateTarget(FileContents* source_file, - const Value* source_patch_value, - FileContents* copy_file, - const Value* copy_patch_value, - const char* source_filename, - const char* target_filename, - const uint8_t target_sha1[SHA_DIGEST_LENGTH], - size_t target_size, - const Value* bonus_data) { - // assume that target_filename (eg "/system/app/Foo.apk") is located - // on the same filesystem as its top-level directory ("/system"). - // We need something that exists for calling statfs(). - std::string target_fs = target_filename; - auto slash_pos = target_fs.find('/', 1); - if (slash_pos != std::string::npos) { - target_fs.resize(slash_pos); - } - - FileContents* source_to_use; - const Value* patch; - if (source_patch_value != nullptr) { - source_to_use = source_file; - patch = source_patch_value; - } else { - source_to_use = copy_file; - patch = copy_patch_value; - } - +static int GenerateTarget(const FileContents& source_file, const std::unique_ptr<Value>& patch, + const std::string& target_filename, + const uint8_t target_sha1[SHA_DIGEST_LENGTH], const Value* bonus_data) { if (patch->type != VAL_BLOB) { printf("patch is not a blob\n"); return 1; @@ -683,137 +634,39 @@ static int GenerateTarget(FileContents* source_file, return 1; } - bool target_is_partition = (strncmp(target_filename, "EMMC:", 5) == 0); - const std::string tmp_target_filename = std::string(target_filename) + ".patch"; - - int retry = 1; - bool made_copy = false; - SHA_CTX ctx; - std::string memory_sink_str; // Don't need to reserve space. - do { - // Is there enough room in the target filesystem to hold the patched file? - - if (target_is_partition) { - // If the target is a partition, we're actually going to - // write the output to /tmp and then copy it to the - // partition. statfs() always returns 0 blocks free for - // /tmp, so instead we'll just assume that /tmp has enough - // space to hold the file. - - // We still write the original source to cache, in case - // the partition write is interrupted. - if (MakeFreeSpaceOnCache(source_file->data.size()) < 0) { - printf("not enough free space on /cache\n"); - return 1; - } - if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) { - printf("failed to back up source file\n"); - return 1; - } - made_copy = true; - retry = 0; - } else { - bool enough_space = false; - if (retry > 0) { - size_t free_space = FreeSpaceForFile(target_fs.c_str()); - enough_space = (free_space > (256 << 10)) && // 256k (two-block) minimum - (free_space > (target_size * 3 / 2)); // 50% margin of error - if (!enough_space) { - printf("target %zu bytes; free space %zu bytes; retry %d; enough %d\n", target_size, - free_space, retry, enough_space); - } - } - - if (!enough_space) { - retry = 0; - } - - if (!enough_space && source_patch_value != nullptr) { - // Using the original source, but not enough free space. First - // copy the source file to cache, then delete it from the original - // location. - - if (strncmp(source_filename, "EMMC:", 5) == 0) { - // It's impossible to free space on the target filesystem by - // deleting the source if the source is a partition. If - // we're ever in a state where we need to do this, fail. - printf("not enough free space for target but source is partition\n"); - return 1; - } + CHECK(android::base::StartsWith(target_filename, "EMMC:")); - if (MakeFreeSpaceOnCache(source_file->data.size()) < 0) { - printf("not enough free space on /cache\n"); - return 1; - } - - if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) { - printf("failed to back up source file\n"); - return 1; - } - made_copy = true; - unlink(source_filename); - - size_t free_space = FreeSpaceForFile(target_fs.c_str()); - printf("(now %zu bytes free for target) ", free_space); - } - } - - SinkFn sink = nullptr; - void* token = nullptr; - unique_fd output_fd; - if (target_is_partition) { - // We store the decoded output in memory. - sink = MemorySink; - token = &memory_sink_str; - } else { - // We write the decoded output to "<tgt-file>.patch". - output_fd.reset(ota_open(tmp_target_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_SYNC, - S_IRUSR | S_IWUSR)); - if (output_fd == -1) { - printf("failed to open output file %s: %s\n", tmp_target_filename.c_str(), strerror(errno)); - return 1; - } - sink = FileSink; - token = &output_fd; - } + // We still write the original source to cache, in case the partition write is interrupted. + if (MakeFreeSpaceOnCache(source_file.data.size()) < 0) { + printf("not enough free space on /cache\n"); + return 1; + } + if (SaveFileContents(CACHE_TEMP_SOURCE, &source_file) < 0) { + printf("failed to back up source file\n"); + return 1; + } - SHA1_Init(&ctx); + // We store the decoded output in memory. + SinkFn sink = MemorySink; + std::string memory_sink_str; // Don't need to reserve space. + void* token = &memory_sink_str; - int result; - if (use_bsdiff) { - result = ApplyBSDiffPatch(source_to_use->data.data(), source_to_use->data.size(), patch, 0, - sink, token, &ctx); - } else { - result = ApplyImagePatch(source_to_use->data.data(), source_to_use->data.size(), patch, sink, - token, &ctx, bonus_data); - } + SHA_CTX ctx; + SHA1_Init(&ctx); - if (!target_is_partition) { - if (ota_fsync(output_fd) != 0) { - printf("failed to fsync file \"%s\": %s\n", tmp_target_filename.c_str(), strerror(errno)); - result = 1; - } - if (ota_close(output_fd) != 0) { - printf("failed to close file \"%s\": %s\n", tmp_target_filename.c_str(), strerror(errno)); - result = 1; - } - } + int result; + if (use_bsdiff) { + result = ApplyBSDiffPatch(source_file.data.data(), source_file.data.size(), patch.get(), 0, + sink, token, &ctx); + } else { + result = ApplyImagePatch(source_file.data.data(), source_file.data.size(), patch.get(), sink, + token, &ctx, bonus_data); + } - if (result != 0) { - if (retry == 0) { - printf("applying patch failed\n"); - return 1; - } else { - printf("applying patch failed; retrying\n"); - } - if (!target_is_partition) { - unlink(tmp_target_filename.c_str()); - } - } else { - // succeeded; no need to retry - break; - } - } while (retry-- > 0); + if (result != 0) { + printf("applying patch failed\n"); + return 1; + } uint8_t current_target_sha1[SHA_DIGEST_LENGTH]; SHA1_Final(current_target_sha1, &ctx); @@ -824,36 +677,15 @@ static int GenerateTarget(FileContents* source_file, printf("now %s\n", short_sha1(target_sha1).c_str()); } - if (target_is_partition) { - // Copy the temp file to the partition. - if (WriteToPartition(reinterpret_cast<const unsigned char*>(memory_sink_str.c_str()), - memory_sink_str.size(), target_filename) != 0) { - printf("write of patched data to %s failed\n", target_filename); - return 1; - } - } else { - // Give the .patch file the same owner, group, and mode of the original source file. - if (chmod(tmp_target_filename.c_str(), source_to_use->st.st_mode) != 0) { - printf("chmod of \"%s\" failed: %s\n", tmp_target_filename.c_str(), strerror(errno)); - return 1; - } - if (chown(tmp_target_filename.c_str(), source_to_use->st.st_uid, - source_to_use->st.st_gid) != 0) { - printf("chown of \"%s\" failed: %s\n", tmp_target_filename.c_str(), strerror(errno)); - return 1; - } - - // Finally, rename the .patch file to replace the target file. - if (rename(tmp_target_filename.c_str(), target_filename) != 0) { - printf("rename of .patch to \"%s\" failed: %s\n", target_filename, strerror(errno)); - return 1; - } + // Write back the temp file to the partition. + if (WriteToPartition(reinterpret_cast<const unsigned char*>(memory_sink_str.c_str()), + memory_sink_str.size(), target_filename) != 0) { + printf("write of patched data to %s failed\n", target_filename.c_str()); + return 1; } - // If this run of applypatch created the copy, and we're here, we can delete it. - if (made_copy) { - unlink(CACHE_TEMP_SOURCE); - } + // Delete the backup copy of the source. + unlink(CACHE_TEMP_SOURCE); // Success! return 0; diff --git a/edify/expr.cpp b/edify/expr.cpp index 2b7fd7a6a..54ab3325c 100644 --- a/edify/expr.cpp +++ b/edify/expr.cpp @@ -357,7 +357,7 @@ bool ReadArgs(State* state, const std::vector<std::unique_ptr<Expr>>& argv, if (args == nullptr) { return false; } - if (len == 0 || start + len > argv.size()) { + if (start + len > argv.size()) { return false; } for (size_t i = start; i < start + len; ++i) { diff --git a/install.cpp b/install.cpp index db8fb97db..7cef44a37 100644 --- a/install.cpp +++ b/install.cpp @@ -546,17 +546,21 @@ install_package(const char* path, bool* wipe_cache, const char* install_file, std::chrono::duration<double> duration = std::chrono::system_clock::now() - start; int time_total = static_cast<int>(duration.count()); - if (ensure_path_mounted(UNCRYPT_STATUS) != 0) { + bool has_cache = volume_for_path("/cache") != nullptr; + // Skip logging the uncrypt_status on devices without /cache. + if (has_cache) { + if (ensure_path_mounted(UNCRYPT_STATUS) != 0) { LOG(WARNING) << "Can't mount " << UNCRYPT_STATUS; - } else { + } else { std::string uncrypt_status; if (!android::base::ReadFileToString(UNCRYPT_STATUS, &uncrypt_status)) { - PLOG(WARNING) << "failed to read uncrypt status"; + PLOG(WARNING) << "failed to read uncrypt status"; } else if (!android::base::StartsWith(uncrypt_status, "uncrypt_")) { - PLOG(WARNING) << "corrupted uncrypt_status: " << uncrypt_status; + LOG(WARNING) << "corrupted uncrypt_status: " << uncrypt_status; } else { - log_buffer.push_back(android::base::Trim(uncrypt_status)); + log_buffer.push_back(android::base::Trim(uncrypt_status)); } + } } // The first two lines need to be the package name and install result. diff --git a/otafault/ota_io.cpp b/otafault/ota_io.cpp index f5b01136f..3a89bb5dd 100644 --- a/otafault/ota_io.cpp +++ b/otafault/ota_io.cpp @@ -89,7 +89,7 @@ static int __ota_fclose(FILE* fh) { return fclose(fh); } -void OtaFcloser::operator()(FILE* f) { +void OtaFcloser::operator()(FILE* f) const { __ota_fclose(f); }; diff --git a/otafault/ota_io.h b/otafault/ota_io.h index 395b4230e..9428f1b1f 100644 --- a/otafault/ota_io.h +++ b/otafault/ota_io.h @@ -59,7 +59,7 @@ using unique_fd = android::base::unique_fd_impl<OtaCloser>; int ota_close(unique_fd& fd); struct OtaFcloser { - void operator()(FILE*); + void operator()(FILE*) const; }; using unique_file = std::unique_ptr<FILE, OtaFcloser>; diff --git a/recovery.cpp b/recovery.cpp index 4bbc7831e..c2262161a 100644 --- a/recovery.cpp +++ b/recovery.cpp @@ -793,47 +793,45 @@ static bool wipe_cache(bool should_confirm, Device* device) { return success; } -// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported. -// Otherwise, it goes with BLKDISCARD (if device supports BLKDISCARDZEROES) or -// BLKZEROOUT. +// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported. Otherwise, it goes with +// BLKDISCARD (if device supports BLKDISCARDZEROES) or BLKZEROOUT. static bool secure_wipe_partition(const std::string& partition) { - android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY))); - if (fd == -1) { - PLOG(ERROR) << "failed to open \"" << partition << "\""; - return false; - } + android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY))); + if (fd == -1) { + PLOG(ERROR) << "Failed to open \"" << partition << "\""; + return false; + } - uint64_t range[2] = {0, 0}; - if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) { - PLOG(ERROR) << "failed to get partition size"; + uint64_t range[2] = { 0, 0 }; + if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) { + PLOG(ERROR) << "Failed to get partition size"; + return false; + } + LOG(INFO) << "Secure-wiping \"" << partition << "\" from " << range[0] << " to " << range[1]; + + LOG(INFO) << " Trying BLKSECDISCARD..."; + if (ioctl(fd, BLKSECDISCARD, &range) == -1) { + PLOG(WARNING) << " Failed"; + + // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT. + unsigned int zeroes; + if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) { + LOG(INFO) << " Trying BLKDISCARD..."; + if (ioctl(fd, BLKDISCARD, &range) == -1) { + PLOG(ERROR) << " Failed"; return false; + } + } else { + LOG(INFO) << " Trying BLKZEROOUT..."; + if (ioctl(fd, BLKZEROOUT, &range) == -1) { + PLOG(ERROR) << " Failed"; + return false; + } } - printf("Secure-wiping \"%s\" from %" PRIu64 " to %" PRIu64 ".\n", - partition.c_str(), range[0], range[1]); - - printf("Trying BLKSECDISCARD...\t"); - if (ioctl(fd, BLKSECDISCARD, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - - // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT. - unsigned int zeroes; - if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) { - printf("Trying BLKDISCARD...\t"); - if (ioctl(fd, BLKDISCARD, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - return false; - } - } else { - printf("Trying BLKZEROOUT...\t"); - if (ioctl(fd, BLKZEROOUT, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - return false; - } - } - } + } - printf("done\n"); - return true; + LOG(INFO) << " Done"; + return true; } // Check if the wipe package matches expectation: @@ -865,7 +863,7 @@ static bool check_wipe_package(size_t wipe_package_size) { return false; } std::string metadata; - if (!read_metadata_from_package(&zip, &metadata)) { + if (!read_metadata_from_package(zip, &metadata)) { CloseArchive(zip); return false; } diff --git a/screen_ui.cpp b/screen_ui.cpp index 706877b4d..bb2772dd8 100644 --- a/screen_ui.cpp +++ b/screen_ui.cpp @@ -98,7 +98,7 @@ GRSurface* ScreenRecoveryUI::GetCurrentText() { } } -int ScreenRecoveryUI::PixelsFromDp(int dp) { +int ScreenRecoveryUI::PixelsFromDp(int dp) const { return dp * density_; } @@ -256,12 +256,12 @@ void ScreenRecoveryUI::DrawHorizontalRule(int* y) { *y += 4; } -void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) { +void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) const { gr_text(gr_sys_font(), x, *y, line, bold); *y += char_height_ + 4; } -void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) { +void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) const { for (size_t i = 0; lines != nullptr && lines[i] != nullptr; ++i) { DrawTextLine(x, y, lines[i], false); } diff --git a/screen_ui.h b/screen_ui.h index b2dcf4aeb..a2322c36c 100644 --- a/screen_ui.h +++ b/screen_ui.h @@ -160,14 +160,14 @@ class ScreenRecoveryUI : public RecoveryUI { void LoadBitmap(const char* filename, GRSurface** surface); void LoadLocalizedBitmap(const char* filename, GRSurface** surface); - int PixelsFromDp(int dp); + int PixelsFromDp(int dp) const; virtual int GetAnimationBaseline(); virtual int GetProgressBaseline(); virtual int GetTextBaseline(); void DrawHorizontalRule(int* y); - void DrawTextLine(int x, int* y, const char* line, bool bold); - void DrawTextLines(int x, int* y, const char* const* lines); + void DrawTextLine(int x, int* y, const char* line, bool bold) const; + void DrawTextLines(int x, int* y, const char* const* lines) const; }; #endif // RECOVERY_UI_H diff --git a/tests/component/applypatch_test.cpp b/tests/component/applypatch_test.cpp index d178303aa..5cba68f8a 100644 --- a/tests/component/applypatch_test.cpp +++ b/tests/component/applypatch_test.cpp @@ -280,66 +280,6 @@ TEST_F(ApplyPatchCacheTest, CheckCacheMissingFailure) { ASSERT_NE(0, applypatch_check(&old_file[0], sha1s)); } -TEST_F(ApplyPatchFullTest, ApplyInPlace) { - std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 }; - ASSERT_EQ(0, applypatch(&old_file[0], "-", &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(old_file, new_file)); - - // reapply, applypatch is idempotent so it should succeed - ASSERT_EQ(0, applypatch(&old_file[0], "-", &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(old_file, new_file)); -} - -TEST_F(ApplyPatchFullTest, ApplyInNewLocation) { - std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 }; - // Apply bsdiff patch to new location. - ASSERT_EQ( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(output_loc, new_file)); - - // Reapply to the same location. - ASSERT_EQ( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(output_loc, new_file)); -} - -TEST_F(ApplyPatchFullTest, ApplyCorruptedInNewLocation) { - std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 }; - // Apply bsdiff patch to new location with corrupted source. - mangle_file(old_file); - ASSERT_EQ( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(output_loc, new_file)); - - // Reapply bsdiff patch to new location with corrupted source. - ASSERT_EQ( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_TRUE(file_cmp(output_loc, new_file)); -} - -TEST_F(ApplyPatchDoubleCacheTest, ApplyDoubleCorruptedInNewLocation) { - std::vector<std::string> sha1s = { bad_sha1_a, old_sha1 }; - - // Apply bsdiff patch to new location with corrupted source and copy (no new file). - // Expected to fail. - mangle_file(old_file); - mangle_file(cache_file); - ASSERT_NE( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_FALSE(file_cmp(output_loc, new_file)); - - // Expected to fail again on retry. - ASSERT_NE( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_FALSE(file_cmp(output_loc, new_file)); - - // Expected to fail with incorrect new file. - mangle_file(output_loc); - ASSERT_NE( - 0, applypatch(&old_file[0], &output_loc[0], &new_sha1[0], new_size, sha1s, patches, nullptr)); - ASSERT_FALSE(file_cmp(output_loc, new_file)); -} - TEST(ApplyPatchModesTest, InvalidArgs) { // At least two args (including the filename). ASSERT_EQ(2, applypatch_modes(1, (const char* []){ "applypatch" })); @@ -348,70 +288,6 @@ TEST(ApplyPatchModesTest, InvalidArgs) { ASSERT_EQ(2, applypatch_modes(2, (const char* []){ "applypatch", "-x" })); } -TEST(ApplyPatchModesTest, PatchMode) { - std::string boot_img = from_testdata_base("boot.img"); - size_t boot_img_size; - std::string boot_img_sha1; - sha1sum(boot_img, &boot_img_sha1, &boot_img_size); - - std::string recovery_img = from_testdata_base("recovery.img"); - std::string recovery_img_sha1; - size_t size; - sha1sum(recovery_img, &recovery_img_sha1, &size); - std::string recovery_img_size = std::to_string(size); - std::string bonus_file = from_testdata_base("bonus.file"); - - // applypatch -b <bonus-file> <src-file> <tgt-file> <tgt-sha1> <tgt-size> <src-sha1>:<patch> - TemporaryFile tmp1; - std::string patch = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot.p"); - std::vector<const char*> args = { - "applypatch", - "-b", - bonus_file.c_str(), - boot_img.c_str(), - tmp1.path, - recovery_img_sha1.c_str(), - recovery_img_size.c_str(), - patch.c_str() - }; - ASSERT_EQ(0, applypatch_modes(args.size(), args.data())); - - // applypatch <src-file> <tgt-file> <tgt-sha1> <tgt-size> <src-sha1>:<patch> - TemporaryFile tmp2; - patch = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot-with-bonus.p"); - std::vector<const char*> args2 = { - "applypatch", - boot_img.c_str(), - tmp2.path, - recovery_img_sha1.c_str(), - recovery_img_size.c_str(), - patch.c_str() - }; - ASSERT_EQ(0, applypatch_modes(args2.size(), args2.data())); - - // applypatch -b <bonus-file> <src-file> <tgt-file> <tgt-sha1> <tgt-size> \ - // <src-sha1-fake>:<patch1> <src-sha1>:<patch2> - TemporaryFile tmp3; - std::string bad_sha1_a = android::base::StringPrintf("%040x", rand()); - std::string bad_sha1_b = android::base::StringPrintf("%040x", rand()); - std::string patch1 = bad_sha1_a + ":" + from_testdata_base("recovery-from-boot.p"); - std::string patch2 = boot_img_sha1 + ":" + from_testdata_base("recovery-from-boot.p"); - std::string patch3 = bad_sha1_b + ":" + from_testdata_base("recovery-from-boot.p"); - std::vector<const char*> args3 = { - "applypatch", - "-b", - bonus_file.c_str(), - boot_img.c_str(), - tmp3.path, - recovery_img_sha1.c_str(), - recovery_img_size.c_str(), - patch1.c_str(), - patch2.c_str(), - patch3.c_str() - }; - ASSERT_EQ(0, applypatch_modes(args3.size(), args3.data())); -} - TEST(ApplyPatchModesTest, PatchModeEmmcTarget) { std::string boot_img = from_testdata_base("boot.img"); size_t boot_img_size; @@ -240,7 +240,7 @@ void RecoveryUI::ProcessKey(int key_code, int updown) { } void* RecoveryUI::time_key_helper(void* cookie) { - key_timer_t* info = (key_timer_t*) cookie; + key_timer_t* info = static_cast<key_timer_t*>(cookie); info->ui->time_key(info->key_code, info->count); delete info; return nullptr; diff --git a/updater/blockimg.cpp b/updater/blockimg.cpp index 12ca151a1..c614ccc47 100644 --- a/updater/blockimg.cpp +++ b/updater/blockimg.cpp @@ -356,7 +356,7 @@ static bool receive_new_data(const uint8_t* data, size_t size, void* cookie) { } static void* unzip_new_data(void* cookie) { - NewThreadInfo* nti = (NewThreadInfo*) cookie; + NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie); ProcessZipEntryContents(nti->za, &nti->entry, receive_new_data, nti); return nullptr; } @@ -429,46 +429,11 @@ struct CommandParameters { uint8_t* patch_start; }; -// Do a source/target load for move/bsdiff/imgdiff in version 1. -// We expect to parse the remainder of the parameter tokens as: -// -// <src_range> <tgt_range> -// -// The source range is loaded into the provided buffer, reallocating -// it to make it larger if necessary. - -static int LoadSrcTgtVersion1(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - std::vector<uint8_t>& buffer, int fd) { - - if (params.cpos + 1 >= params.tokens.size()) { - LOG(ERROR) << "invalid parameters"; - return -1; - } - - // <src_range> - RangeSet src = parse_range(params.tokens[params.cpos++]); - - // <tgt_range> - tgt = parse_range(params.tokens[params.cpos++]); - - allocate(src.size * BLOCKSIZE, buffer); - int rc = ReadBlocks(src, buffer, fd); - src_blocks = src.size; - - return rc; -} - // Print the hash in hex for corrupted source blocks (excluding the stashed blocks which is // handled separately). static void PrintHashForCorruptedSourceBlocks(const CommandParameters& params, const std::vector<uint8_t>& buffer) { LOG(INFO) << "unexpected contents of source blocks in cmd:\n" << params.cmdline; - if (params.version < 3) { - // TODO handle version 1,2 - LOG(WARNING) << "version number " << params.version << " is not supported to print hashes"; - return; - } - CHECK(params.tokens[0] == "move" || params.tokens[0] == "bsdiff" || params.tokens[0] == "imgdiff"); @@ -651,8 +616,8 @@ static void DeleteStash(const std::string& base) { } } -static int LoadStash(CommandParameters& params, const std::string& base, const std::string& id, - bool verify, size_t* blocks, std::vector<uint8_t>& buffer, bool printnoent) { +static int LoadStash(CommandParameters& params, const std::string& id, bool verify, size_t* blocks, + std::vector<uint8_t>& buffer, bool printnoent) { // In verify mode, if source range_set was saved for the given hash, // check contents in the source blocks first. If the check fails, // search for the stashed files on /cache as usual. @@ -674,17 +639,13 @@ static int LoadStash(CommandParameters& params, const std::string& base, const s } } - if (base.empty()) { - return -1; - } - size_t blockcount = 0; if (!blocks) { blocks = &blockcount; } - std::string fn = GetStashFileName(base, id, ""); + std::string fn = GetStashFileName(params.stashbase, id, ""); struct stat sb; int res = stat(fn.c_str(), &sb); @@ -735,7 +696,7 @@ static int LoadStash(CommandParameters& params, const std::string& base, const s } static int WriteStash(const std::string& base, const std::string& id, int blocks, - std::vector<uint8_t>& buffer, bool checkspace, bool *exists) { + std::vector<uint8_t>& buffer, bool checkspace, bool *exists) { if (base.empty()) { return -1; } @@ -895,52 +856,6 @@ static int CreateStash(State* state, size_t maxblocks, const std::string& blockd return 0; // Using existing directory } -static int SaveStash(CommandParameters& params, const std::string& base, - std::vector<uint8_t>& buffer, int fd, bool usehash) { - - // <stash_id> <src_range> - if (params.cpos + 1 >= params.tokens.size()) { - LOG(ERROR) << "missing id and/or src range fields in stash command"; - return -1; - } - const std::string& id = params.tokens[params.cpos++]; - - size_t blocks = 0; - if (usehash && LoadStash(params, base, id, true, &blocks, buffer, false) == 0) { - // Stash file already exists and has expected contents. Do not - // read from source again, as the source may have been already - // overwritten during a previous attempt. - return 0; - } - - RangeSet src = parse_range(params.tokens[params.cpos++]); - - allocate(src.size * BLOCKSIZE, buffer); - if (ReadBlocks(src, buffer, fd) == -1) { - return -1; - } - blocks = src.size; - stash_map[id] = src; - - if (usehash && VerifyBlocks(id, buffer, blocks, true) != 0) { - // Source blocks have unexpected contents. If we actually need this - // data later, this is an unrecoverable error. However, the command - // that uses the data may have already completed previously, so the - // possible failure will occur during source block verification. - LOG(ERROR) << "failed to load source blocks for stash " << id; - return 0; - } - - // In verify mode, we don't need to stash any blocks. - if (!params.canwrite && usehash) { - return 0; - } - - LOG(INFO) << "stashing " << blocks << " blocks to " << id; - params.stashed += blocks; - return WriteStash(base, id, blocks, buffer, false, nullptr); -} - static int FreeStash(const std::string& base, const std::string& id) { if (base.empty() || id.empty()) { return -1; @@ -980,13 +895,12 @@ static void MoveRange(std::vector<uint8_t>& dest, const RangeSet& locs, // <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...> // (loads data from both source image and stashes) // -// On return, buffer is filled with the loaded source data (rearranged -// and combined with stashed data as necessary). buffer may be -// reallocated if needed to accommodate the source data. *tgt is the -// target RangeSet. Any stashes required are loaded using LoadStash. +// On return, params.buffer is filled with the loaded source data (rearranged and combined with +// stashed data as necessary). buffer may be reallocated if needed to accommodate the source data. +// *tgt is the target RangeSet. Any stashes required are loaded using LoadStash. static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - std::vector<uint8_t>& buffer, int fd, const std::string& stashbase, bool* overlap) { + bool* overlap) { // At least it needs to provide three parameters: <tgt_range>, // <src_block_count> and "-"/<src_range>. @@ -1005,7 +919,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& return -1; } - allocate(src_blocks * BLOCKSIZE, buffer); + allocate(src_blocks * BLOCKSIZE, params.buffer); // "-" or <src_range> [<src_loc>] if (params.tokens[params.cpos] == "-") { @@ -1013,7 +927,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& params.cpos++; } else { RangeSet src = parse_range(params.tokens[params.cpos++]); - int res = ReadBlocks(src, buffer, fd); + int res = ReadBlocks(src, params.buffer, params.fd); if (overlap) { *overlap = range_overlaps(src, tgt); @@ -1029,7 +943,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& } RangeSet locs = parse_range(params.tokens[params.cpos++]); - MoveRange(buffer, locs, buffer); + MoveRange(params.buffer, locs, params.buffer); } // <[stash_id:stash_range]> @@ -1044,7 +958,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& } std::vector<uint8_t> stash; - int res = LoadStash(params, stashbase, tokens[0], false, nullptr, stash, true); + int res = LoadStash(params, tokens[0], false, nullptr, stash, true); if (res == -1) { // These source blocks will fail verification if used later, but we @@ -1055,32 +969,41 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& RangeSet locs = parse_range(tokens[1]); - MoveRange(buffer, locs, stash); + MoveRange(params.buffer, locs, stash); } return 0; } -// Do a source/target load for move/bsdiff/imgdiff in version 3. -// -// Parameters are the same as for LoadSrcTgtVersion2, except for 'onehash', which -// tells the function whether to expect separate source and targe block hashes, or -// if they are both the same and only one hash should be expected, and -// 'isunresumable', which receives a non-zero value if block verification fails in -// a way that the update cannot be resumed anymore. -// -// If the function is unable to load the necessary blocks or their contents don't -// match the hashes, the return value is -1 and the command should be aborted. -// -// If the return value is 1, the command has already been completed according to -// the contents of the target blocks, and should not be performed again. -// -// If the return value is 0, source blocks have expected content and the command -// can be performed. - +/** + * Do a source/target load for move/bsdiff/imgdiff in version 3. + * + * We expect to parse the remainder of the parameter tokens as one of: + * + * <tgt_range> <src_block_count> <src_range> + * (loads data from source image only) + * + * <tgt_range> <src_block_count> - <[stash_id:stash_range] ...> + * (loads data from stashes only) + * + * <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...> + * (loads data from both source image and stashes) + * + * Parameters are the same as for LoadSrcTgtVersion2, except for 'onehash', which tells the function + * whether to expect separate source and targe block hashes, or if they are both the same and only + * one hash should be expected, and 'isunresumable', which receives a non-zero value if block + * verification fails in a way that the update cannot be resumed anymore. + * + * If the function is unable to load the necessary blocks or their contents don't match the hashes, + * the return value is -1 and the command should be aborted. + * + * If the return value is 1, the command has already been completed according to the contents of the + * target blocks, and should not be performed again. + * + * If the return value is 0, source blocks have expected content and the command can be performed. + */ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - bool onehash, bool& overlap) { - + bool onehash, bool& overlap) { if (params.cpos >= params.tokens.size()) { LOG(ERROR) << "missing source hash"; return -1; @@ -1099,8 +1022,7 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& tgthash = params.tokens[params.cpos++]; } - if (LoadSrcTgtVersion2(params, tgt, src_blocks, params.buffer, params.fd, - params.stashbase, &overlap) == -1) { + if (LoadSrcTgtVersion2(params, tgt, src_blocks, &overlap) == -1) { return -1; } @@ -1111,7 +1033,7 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } if (VerifyBlocks(tgthash, tgtbuffer, tgt.size, false) == 0) { - // Target blocks already have expected content, command should be skipped + // Target blocks already have expected content, command should be skipped. return 1; } @@ -1130,25 +1052,24 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } params.stashed += src_blocks; - // Can be deleted when the write has completed + // Can be deleted when the write has completed. if (!stash_exists) { params.freestash = srchash; } } - // Source blocks have expected content, command can proceed + // Source blocks have expected content, command can proceed. return 0; } - if (overlap && LoadStash(params, params.stashbase, srchash, true, nullptr, params.buffer, - true) == 0) { + if (overlap && LoadStash(params, srchash, true, nullptr, params.buffer, true) == 0) { // Overlapping source blocks were previously stashed, command can proceed. // We are recovering from an interrupted command, so we don't know if the // stash can safely be deleted after this command. return 0; } - // Valid source data not available, update cannot be resumed + // Valid source data not available, update cannot be resumed. LOG(ERROR) << "partition has unexpected contents"; PrintHashForCorruptedSourceBlocks(params, params.buffer); @@ -1158,75 +1079,101 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } static int PerformCommandMove(CommandParameters& params) { - size_t blocks = 0; - bool overlap = false; - int status = 0; - RangeSet tgt; + size_t blocks = 0; + bool overlap = false; + RangeSet tgt; + int status = LoadSrcTgtVersion3(params, tgt, blocks, true, overlap); - if (params.version == 1) { - status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd); - } else if (params.version == 2) { - status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd, - params.stashbase, nullptr); - } else if (params.version >= 3) { - status = LoadSrcTgtVersion3(params, tgt, blocks, true, overlap); - } + if (status == -1) { + LOG(ERROR) << "failed to read blocks for move"; + return -1; + } - if (status == -1) { - LOG(ERROR) << "failed to read blocks for move"; - return -1; - } + if (status == 0) { + params.foundwrites = true; + } else if (params.foundwrites) { + LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]"; + } + if (params.canwrite) { if (status == 0) { - params.foundwrites = true; - } else if (params.foundwrites) { - LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]"; + LOG(INFO) << " moving " << blocks << " blocks"; + + if (WriteBlocks(tgt, params.buffer, params.fd) == -1) { + return -1; + } + } else { + LOG(INFO) << "skipping " << blocks << " already moved blocks"; } + } - if (params.canwrite) { - if (status == 0) { - LOG(INFO) << " moving " << blocks << " blocks"; + if (!params.freestash.empty()) { + FreeStash(params.stashbase, params.freestash); + params.freestash.clear(); + } - if (WriteBlocks(tgt, params.buffer, params.fd) == -1) { - return -1; - } - } else { - LOG(INFO) << "skipping " << blocks << " already moved blocks"; - } + params.written += tgt.size; - } + return 0; +} - if (!params.freestash.empty()) { - FreeStash(params.stashbase, params.freestash); - params.freestash.clear(); - } +static int PerformCommandStash(CommandParameters& params) { + // <stash_id> <src_range> + if (params.cpos + 1 >= params.tokens.size()) { + LOG(ERROR) << "missing id and/or src range fields in stash command"; + return -1; + } - params.written += tgt.size; + const std::string& id = params.tokens[params.cpos++]; + size_t blocks = 0; + if (LoadStash(params, id, true, &blocks, params.buffer, false) == 0) { + // Stash file already exists and has expected contents. Do not read from source again, as the + // source may have been already overwritten during a previous attempt. + return 0; + } + + RangeSet src = parse_range(params.tokens[params.cpos++]); + allocate(src.size * BLOCKSIZE, params.buffer); + if (ReadBlocks(src, params.buffer, params.fd) == -1) { + return -1; + } + blocks = src.size; + stash_map[id] = src; + + if (VerifyBlocks(id, params.buffer, blocks, true) != 0) { + // Source blocks have unexpected contents. If we actually need this data later, this is an + // unrecoverable error. However, the command that uses the data may have already completed + // previously, so the possible failure will occur during source block verification. + LOG(ERROR) << "failed to load source blocks for stash " << id; return 0; -} + } -static int PerformCommandStash(CommandParameters& params) { - return SaveStash(params, params.stashbase, params.buffer, params.fd, - (params.version >= 3)); + // In verify mode, we don't need to stash any blocks. + if (!params.canwrite) { + return 0; + } + + LOG(INFO) << "stashing " << blocks << " blocks to " << id; + params.stashed += blocks; + return WriteStash(params.stashbase, id, blocks, params.buffer, false, nullptr); } static int PerformCommandFree(CommandParameters& params) { - // <stash_id> - if (params.cpos >= params.tokens.size()) { - LOG(ERROR) << "missing stash id in free command"; - return -1; - } - - const std::string& id = params.tokens[params.cpos++]; + // <stash_id> + if (params.cpos >= params.tokens.size()) { + LOG(ERROR) << "missing stash id in free command"; + return -1; + } - stash_map.erase(id); + const std::string& id = params.tokens[params.cpos++]; + stash_map.erase(id); - if (params.createdstash || params.canwrite) { - return FreeStash(params.stashbase, id); - } + if (params.createdstash || params.canwrite) { + return FreeStash(params.stashbase, id); + } - return 0; + return 0; } static int PerformCommandZero(CommandParameters& params) { @@ -1337,15 +1284,7 @@ static int PerformCommandDiff(CommandParameters& params) { RangeSet tgt; size_t blocks = 0; bool overlap = false; - int status = 0; - if (params.version == 1) { - status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd); - } else if (params.version == 2) { - status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd, - params.stashbase, nullptr); - } else if (params.version >= 3) { - status = LoadSrcTgtVersion3(params, tgt, blocks, false, overlap); - } + int status = LoadSrcTgtVersion3(params, tgt, blocks, false, overlap); if (status == -1) { LOG(ERROR) << "failed to read blocks for diff"; @@ -1471,301 +1410,281 @@ struct Command { static Value* PerformBlockImageUpdate(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv, const Command* commands, size_t cmdcount, bool dryrun) { - CommandParameters params = {}; - params.canwrite = !dryrun; + CommandParameters params = {}; + params.canwrite = !dryrun; - LOG(INFO) << "performing " << (dryrun ? "verification" : "update"); - if (state->is_retry) { - is_retry = true; - LOG(INFO) << "This update is a retry."; - } - if (argv.size() != 4) { - ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu", - argv.size()); - return StringValue(""); - } + LOG(INFO) << "performing " << (dryrun ? "verification" : "update"); + if (state->is_retry) { + is_retry = true; + LOG(INFO) << "This update is a retry."; + } + if (argv.size() != 4) { + ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu", + argv.size()); + return StringValue(""); + } - std::vector<std::unique_ptr<Value>> args; - if (!ReadValueArgs(state, argv, &args)) { - return nullptr; - } + std::vector<std::unique_ptr<Value>> args; + if (!ReadValueArgs(state, argv, &args)) { + return nullptr; + } - const Value* blockdev_filename = args[0].get(); - const Value* transfer_list_value = args[1].get(); - const Value* new_data_fn = args[2].get(); - const Value* patch_data_fn = args[3].get(); + const Value* blockdev_filename = args[0].get(); + const Value* transfer_list_value = args[1].get(); + const Value* new_data_fn = args[2].get(); + const Value* patch_data_fn = args[3].get(); - if (blockdev_filename->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", - name); - return StringValue(""); - } - if (transfer_list_value->type != VAL_BLOB) { - ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name); - return StringValue(""); - } - if (new_data_fn->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name); - return StringValue(""); - } - if (patch_data_fn->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", - name); - return StringValue(""); - } + if (blockdev_filename->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name); + return StringValue(""); + } + if (transfer_list_value->type != VAL_BLOB) { + ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name); + return StringValue(""); + } + if (new_data_fn->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name); + return StringValue(""); + } + if (patch_data_fn->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", name); + return StringValue(""); + } - UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie); - if (ui == nullptr) { - return StringValue(""); - } + UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie); + if (ui == nullptr) { + return StringValue(""); + } - FILE* cmd_pipe = ui->cmd_pipe; - ZipArchiveHandle za = ui->package_zip; + FILE* cmd_pipe = ui->cmd_pipe; + ZipArchiveHandle za = ui->package_zip; - if (cmd_pipe == nullptr || za == nullptr) { - return StringValue(""); - } + if (cmd_pipe == nullptr || za == nullptr) { + return StringValue(""); + } - ZipString path_data(patch_data_fn->data.c_str()); - ZipEntry patch_entry; - if (FindEntry(za, path_data, &patch_entry) != 0) { - LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package"; - return StringValue(""); - } + ZipString path_data(patch_data_fn->data.c_str()); + ZipEntry patch_entry; + if (FindEntry(za, path_data, &patch_entry) != 0) { + LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package"; + return StringValue(""); + } - params.patch_start = ui->package_zip_addr + patch_entry.offset; - ZipString new_data(new_data_fn->data.c_str()); - ZipEntry new_entry; - if (FindEntry(za, new_data, &new_entry) != 0) { - LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package"; - return StringValue(""); - } + params.patch_start = ui->package_zip_addr + patch_entry.offset; + ZipString new_data(new_data_fn->data.c_str()); + ZipEntry new_entry; + if (FindEntry(za, new_data, &new_entry) != 0) { + LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package"; + return StringValue(""); + } - params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR))); - if (params.fd == -1) { - PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed"; - return StringValue(""); - } + params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR))); + if (params.fd == -1) { + PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed"; + return StringValue(""); + } - if (params.canwrite) { - params.nti.za = za; - params.nti.entry = new_entry; - - pthread_mutex_init(¶ms.nti.mu, nullptr); - pthread_cond_init(¶ms.nti.cv, nullptr); - pthread_attr_t attr; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - - int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti); - if (error != 0) { - PLOG(ERROR) << "pthread_create failed"; - return StringValue(""); - } - } + if (params.canwrite) { + params.nti.za = za; + params.nti.entry = new_entry; - std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n"); - if (lines.size() < 2) { - ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n", - lines.size()); - return StringValue(""); - } + pthread_mutex_init(¶ms.nti.mu, nullptr); + pthread_cond_init(¶ms.nti.cv, nullptr); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - // First line in transfer list is the version number - if (!android::base::ParseInt(lines[0], ¶ms.version, 1, 4)) { - LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]"; - return StringValue(""); + int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti); + if (error != 0) { + PLOG(ERROR) << "pthread_create failed"; + return StringValue(""); } + } - LOG(INFO) << "blockimg version is " << params.version; - - // Second line in transfer list is the total number of blocks we expect to write - size_t total_blocks; - if (!android::base::ParseUint(lines[1], &total_blocks)) { - ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str()); - return StringValue(""); - } + std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n"); + if (lines.size() < 2) { + ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n", + lines.size()); + return StringValue(""); + } - if (total_blocks == 0) { - return StringValue("t"); - } + // First line in transfer list is the version number. + if (!android::base::ParseInt(lines[0], ¶ms.version, 3, 4)) { + LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]"; + return StringValue(""); + } - size_t start = 2; - if (params.version >= 2) { - if (lines.size() < 4) { - ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n", - lines.size()); - return StringValue(""); - } + LOG(INFO) << "blockimg version is " << params.version; - // Third line is how many stash entries are needed simultaneously - LOG(INFO) << "maximum stash entries " << lines[2]; + // Second line in transfer list is the total number of blocks we expect to write. + size_t total_blocks; + if (!android::base::ParseUint(lines[1], &total_blocks)) { + ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str()); + return StringValue(""); + } - // Fourth line is the maximum number of blocks that will be stashed simultaneously - size_t stash_max_blocks; - if (!android::base::ParseUint(lines[3], &stash_max_blocks)) { - ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n", - lines[3].c_str()); - return StringValue(""); - } + if (total_blocks == 0) { + return StringValue("t"); + } - int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase); - if (res == -1) { - return StringValue(""); - } + size_t start = 2; + if (lines.size() < 4) { + ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n", + lines.size()); + return StringValue(""); + } - params.createdstash = res; + // Third line is how many stash entries are needed simultaneously. + LOG(INFO) << "maximum stash entries " << lines[2]; - start += 2; - } + // Fourth line is the maximum number of blocks that will be stashed simultaneously + size_t stash_max_blocks; + if (!android::base::ParseUint(lines[3], &stash_max_blocks)) { + ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n", + lines[3].c_str()); + return StringValue(""); + } - // Build a map of the available commands - std::unordered_map<std::string, const Command*> cmd_map; - for (size_t i = 0; i < cmdcount; ++i) { - if (cmd_map.find(commands[i].name) != cmd_map.end()) { - LOG(ERROR) << "Error: command [" << commands[i].name - << "] already exists in the cmd map."; - return StringValue(strdup("")); - } - cmd_map[commands[i].name] = &commands[i]; - } + int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase); + if (res == -1) { + return StringValue(""); + } - int rc = -1; + params.createdstash = res; - // Subsequent lines are all individual transfer commands - for (auto it = lines.cbegin() + start; it != lines.cend(); it++) { - const std::string& line(*it); - if (line.empty()) continue; + start += 2; - params.tokens = android::base::Split(line, " "); - params.cpos = 0; - params.cmdname = params.tokens[params.cpos++].c_str(); - params.cmdline = line.c_str(); + // Build a map of the available commands + std::unordered_map<std::string, const Command*> cmd_map; + for (size_t i = 0; i < cmdcount; ++i) { + if (cmd_map.find(commands[i].name) != cmd_map.end()) { + LOG(ERROR) << "Error: command [" << commands[i].name << "] already exists in the cmd map."; + return StringValue(strdup("")); + } + cmd_map[commands[i].name] = &commands[i]; + } - if (cmd_map.find(params.cmdname) == cmd_map.end()) { - LOG(ERROR) << "unexpected command [" << params.cmdname << "]"; - goto pbiudone; - } + int rc = -1; - const Command* cmd = cmd_map[params.cmdname]; + // Subsequent lines are all individual transfer commands + for (auto it = lines.cbegin() + start; it != lines.cend(); it++) { + const std::string& line(*it); + if (line.empty()) continue; - if (cmd->f != nullptr && cmd->f(params) == -1) { - LOG(ERROR) << "failed to execute command [" << line << "]"; - goto pbiudone; - } + params.tokens = android::base::Split(line, " "); + params.cpos = 0; + params.cmdname = params.tokens[params.cpos++].c_str(); + params.cmdline = line.c_str(); - if (params.canwrite) { - if (ota_fsync(params.fd) == -1) { - failure_type = kFsyncFailure; - PLOG(ERROR) << "fsync failed"; - goto pbiudone; - } - fprintf(cmd_pipe, "set_progress %.4f\n", - static_cast<double>(params.written) / total_blocks); - fflush(cmd_pipe); - } + if (cmd_map.find(params.cmdname) == cmd_map.end()) { + LOG(ERROR) << "unexpected command [" << params.cmdname << "]"; + goto pbiudone; } - if (params.canwrite) { - pthread_join(params.thread, nullptr); - - LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks; - LOG(INFO) << "stashed " << params.stashed << " blocks"; - LOG(INFO) << "max alloc needed was " << params.buffer.size(); - - const char* partition = strrchr(blockdev_filename->data.c_str(), '/'); - if (partition != nullptr && *(partition + 1) != 0) { - fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, - params.written * BLOCKSIZE); - fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, - params.stashed * BLOCKSIZE); - fflush(cmd_pipe); - } - // Delete stash only after successfully completing the update, as it - // may contain blocks needed to complete the update later. - DeleteStash(params.stashbase); - } else { - LOG(INFO) << "verified partition contents; update may be resumed"; - } + const Command* cmd = cmd_map[params.cmdname]; - rc = 0; + if (cmd->f != nullptr && cmd->f(params) == -1) { + LOG(ERROR) << "failed to execute command [" << line << "]"; + goto pbiudone; + } -pbiudone: - if (ota_fsync(params.fd) == -1) { + if (params.canwrite) { + if (ota_fsync(params.fd) == -1) { failure_type = kFsyncFailure; PLOG(ERROR) << "fsync failed"; + goto pbiudone; + } + fprintf(cmd_pipe, "set_progress %.4f\n", static_cast<double>(params.written) / total_blocks); + fflush(cmd_pipe); } - // params.fd will be automatically closed because it's a unique_fd. + } - // Only delete the stash if the update cannot be resumed, or it's - // a verification run and we created the stash. - if (params.isunresumable || (!params.canwrite && params.createdstash)) { - DeleteStash(params.stashbase); - } + if (params.canwrite) { + pthread_join(params.thread, nullptr); - if (failure_type != kNoCause && state->cause_code == kNoCause) { - state->cause_code = failure_type; + LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks; + LOG(INFO) << "stashed " << params.stashed << " blocks"; + LOG(INFO) << "max alloc needed was " << params.buffer.size(); + + const char* partition = strrchr(blockdev_filename->data.c_str(), '/'); + if (partition != nullptr && *(partition + 1) != 0) { + fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, params.written * BLOCKSIZE); + fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, params.stashed * BLOCKSIZE); + fflush(cmd_pipe); } + // Delete stash only after successfully completing the update, as it may contain blocks needed + // to complete the update later. + DeleteStash(params.stashbase); + } else { + LOG(INFO) << "verified partition contents; update may be resumed"; + } - return StringValue(rc == 0 ? "t" : ""); -} + rc = 0; -// The transfer list is a text file containing commands to -// transfer data from one place to another on the target -// partition. We parse it and execute the commands in order: -// -// zero [rangeset] -// - fill the indicated blocks with zeros -// -// new [rangeset] -// - fill the blocks with data read from the new_data file -// -// erase [rangeset] -// - mark the given blocks as empty -// -// move <...> -// bsdiff <patchstart> <patchlen> <...> -// imgdiff <patchstart> <patchlen> <...> -// - read the source blocks, apply a patch (or not in the -// case of move), write result to target blocks. bsdiff or -// imgdiff specifies the type of patch; move means no patch -// at all. -// -// The format of <...> differs between versions 1 and 2; -// see the LoadSrcTgtVersion{1,2}() functions for a -// description of what's expected. -// -// stash <stash_id> <src_range> -// - (version 2+ only) load the given source range and stash -// the data in the given slot of the stash table. -// -// free <stash_id> -// - (version 3+ only) free the given stash data. -// -// The creator of the transfer list will guarantee that no block -// is read (ie, used as the source for a patch or move) after it -// has been written. -// -// In version 2, the creator will guarantee that a given stash is -// loaded (with a stash command) before it's used in a -// move/bsdiff/imgdiff command. -// -// Within one command the source and target ranges may overlap so -// in general we need to read the entire source into memory before -// writing anything to the target blocks. -// -// All the patch data is concatenated into one patch_data file in -// the update package. It must be stored uncompressed because we -// memory-map it in directly from the archive. (Since patches are -// already compressed, we lose very little by not compressing -// their concatenation.) -// -// In version 3, commands that read data from the partition (i.e. -// move/bsdiff/imgdiff/stash) have one or more additional hashes -// before the range parameters, which are used to check if the -// command has already been completed and verify the integrity of -// the source data. +pbiudone: + if (ota_fsync(params.fd) == -1) { + failure_type = kFsyncFailure; + PLOG(ERROR) << "fsync failed"; + } + // params.fd will be automatically closed because it's a unique_fd. + + // Only delete the stash if the update cannot be resumed, or it's a verification run and we + // created the stash. + if (params.isunresumable || (!params.canwrite && params.createdstash)) { + DeleteStash(params.stashbase); + } + + if (failure_type != kNoCause && state->cause_code == kNoCause) { + state->cause_code = failure_type; + } + return StringValue(rc == 0 ? "t" : ""); +} + +/** + * The transfer list is a text file containing commands to transfer data from one place to another + * on the target partition. We parse it and execute the commands in order: + * + * zero [rangeset] + * - Fill the indicated blocks with zeros. + * + * new [rangeset] + * - Fill the blocks with data read from the new_data file. + * + * erase [rangeset] + * - Mark the given blocks as empty. + * + * move <...> + * bsdiff <patchstart> <patchlen> <...> + * imgdiff <patchstart> <patchlen> <...> + * - Read the source blocks, apply a patch (or not in the case of move), write result to target + * blocks. bsdiff or imgdiff specifies the type of patch; move means no patch at all. + * + * See the comments in LoadSrcTgtVersion3() for a description of the <...> format. + * + * stash <stash_id> <src_range> + * - Load the given source range and stash the data in the given slot of the stash table. + * + * free <stash_id> + * - Free the given stash data. + * + * The creator of the transfer list will guarantee that no block is read (ie, used as the source for + * a patch or move) after it has been written. + * + * The creator will guarantee that a given stash is loaded (with a stash command) before it's used + * in a move/bsdiff/imgdiff command. + * + * Within one command the source and target ranges may overlap so in general we need to read the + * entire source into memory before writing anything to the target blocks. + * + * All the patch data is concatenated into one patch_data file in the update package. It must be + * stored uncompressed because we memory-map it in directly from the archive. (Since patches are + * already compressed, we lose very little by not compressing their concatenation.) + * + * Commands that read data from the partition (i.e. move/bsdiff/imgdiff/stash) have one or more + * additional hashes before the range parameters, which are used to check if the command has already + * been completed and verify the integrity of the source data. + */ Value* BlockImageVerifyFn(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv) { // Commands which are not tested are set to nullptr to skip them completely diff --git a/verifier.cpp b/verifier.cpp index e9d540cdb..23142c120 100644 --- a/verifier.cpp +++ b/verifier.cpp @@ -370,7 +370,7 @@ std::unique_ptr<RSA, RSADeleter> parse_rsa_key(FILE* file, uint32_t exponent) { } struct BNDeleter { - void operator()(BIGNUM* bn) { + void operator()(BIGNUM* bn) const { BN_free(bn); } }; diff --git a/verifier.h b/verifier.h index 6bee74947..6fa8f2b0a 100644 --- a/verifier.h +++ b/verifier.h @@ -26,13 +26,13 @@ #include <openssl/sha.h> struct RSADeleter { - void operator()(RSA* rsa) { + void operator()(RSA* rsa) const { RSA_free(rsa); } }; struct ECKEYDeleter { - void operator()(EC_KEY* ec_key) { + void operator()(EC_KEY* ec_key) const { EC_KEY_free(ec_key); } }; diff --git a/wear_touch.cpp b/wear_touch.cpp index cf33daa9f..e2ab44d2d 100644 --- a/wear_touch.cpp +++ b/wear_touch.cpp @@ -118,7 +118,7 @@ void WearSwipeDetector::run() { } void* WearSwipeDetector::touch_thread(void* cookie) { - ((WearSwipeDetector*)cookie)->run(); + (static_cast<WearSwipeDetector*>(cookie))->run(); return NULL; } |