diff options
-rw-r--r-- | bootloader_message/bootloader_message.cpp | 10 | ||||
-rw-r--r-- | otafault/ota_io.cpp | 2 | ||||
-rw-r--r-- | otafault/ota_io.h | 2 | ||||
-rw-r--r-- | recovery.cpp | 82 | ||||
-rw-r--r-- | screen_ui.cpp | 6 | ||||
-rw-r--r-- | screen_ui.h | 6 | ||||
-rw-r--r-- | ui.cpp | 2 | ||||
-rw-r--r-- | updater/blockimg.cpp | 795 | ||||
-rw-r--r-- | verifier.cpp | 2 | ||||
-rw-r--r-- | verifier.h | 4 | ||||
-rw-r--r-- | wear_touch.cpp | 2 |
11 files changed, 419 insertions, 494 deletions
diff --git a/bootloader_message/bootloader_message.cpp b/bootloader_message/bootloader_message.cpp index d8086be28..d17e055bb 100644 --- a/bootloader_message/bootloader_message.cpp +++ b/bootloader_message/bootloader_message.cpp @@ -19,6 +19,7 @@ #include <errno.h> #include <fcntl.h> #include <string.h> +#include <unistd.h> #include <string> #include <vector> @@ -30,8 +31,13 @@ #include <fs_mgr.h> static std::string get_misc_blk_device(std::string* err) { - std::unique_ptr<fstab, decltype(&fs_mgr_free_fstab)> fstab(fs_mgr_read_fstab_default(), - fs_mgr_free_fstab); + std::unique_ptr<fstab, decltype(&fs_mgr_free_fstab)> fstab(nullptr, fs_mgr_free_fstab); + // Use different fstab paths for normal boot and recovery boot, respectively + if (access("/sbin/recovery", F_OK) == 0) { + fstab.reset(fs_mgr_read_fstab_with_dt("/etc/recovery.fstab")); + } else { + fstab.reset(fs_mgr_read_fstab_default()); + } if (!fstab) { *err = "failed to read default fstab"; return ""; diff --git a/otafault/ota_io.cpp b/otafault/ota_io.cpp index f5b01136f..3a89bb5dd 100644 --- a/otafault/ota_io.cpp +++ b/otafault/ota_io.cpp @@ -89,7 +89,7 @@ static int __ota_fclose(FILE* fh) { return fclose(fh); } -void OtaFcloser::operator()(FILE* f) { +void OtaFcloser::operator()(FILE* f) const { __ota_fclose(f); }; diff --git a/otafault/ota_io.h b/otafault/ota_io.h index 395b4230e..9428f1b1f 100644 --- a/otafault/ota_io.h +++ b/otafault/ota_io.h @@ -59,7 +59,7 @@ using unique_fd = android::base::unique_fd_impl<OtaCloser>; int ota_close(unique_fd& fd); struct OtaFcloser { - void operator()(FILE*); + void operator()(FILE*) const; }; using unique_file = std::unique_ptr<FILE, OtaFcloser>; diff --git a/recovery.cpp b/recovery.cpp index ccb8e5d95..c2262161a 100644 --- a/recovery.cpp +++ b/recovery.cpp @@ -752,13 +752,15 @@ static bool wipe_data(Device* device) { static bool prompt_and_wipe_data(Device* device) { const char* const headers[] = { - "Boot halted, user data is corrupt", - "Wipe all user data to recover", + "Can't load Android system. Your data may be corrupt.", + "If you continue to get this message, you may need to", + "perform a factory data reset and erase all user data", + "stored on this device.", NULL }; const char* const items[] = { - "Retry boot", - "Wipe user data", + "Try again", + "Factory data reset", NULL }; for (;;) { @@ -791,47 +793,45 @@ static bool wipe_cache(bool should_confirm, Device* device) { return success; } -// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported. -// Otherwise, it goes with BLKDISCARD (if device supports BLKDISCARDZEROES) or -// BLKZEROOUT. +// Secure-wipe a given partition. It uses BLKSECDISCARD, if supported. Otherwise, it goes with +// BLKDISCARD (if device supports BLKDISCARDZEROES) or BLKZEROOUT. static bool secure_wipe_partition(const std::string& partition) { - android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY))); - if (fd == -1) { - PLOG(ERROR) << "failed to open \"" << partition << "\""; - return false; - } + android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(partition.c_str(), O_WRONLY))); + if (fd == -1) { + PLOG(ERROR) << "Failed to open \"" << partition << "\""; + return false; + } - uint64_t range[2] = {0, 0}; - if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) { - PLOG(ERROR) << "failed to get partition size"; + uint64_t range[2] = { 0, 0 }; + if (ioctl(fd, BLKGETSIZE64, &range[1]) == -1 || range[1] == 0) { + PLOG(ERROR) << "Failed to get partition size"; + return false; + } + LOG(INFO) << "Secure-wiping \"" << partition << "\" from " << range[0] << " to " << range[1]; + + LOG(INFO) << " Trying BLKSECDISCARD..."; + if (ioctl(fd, BLKSECDISCARD, &range) == -1) { + PLOG(WARNING) << " Failed"; + + // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT. + unsigned int zeroes; + if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) { + LOG(INFO) << " Trying BLKDISCARD..."; + if (ioctl(fd, BLKDISCARD, &range) == -1) { + PLOG(ERROR) << " Failed"; return false; + } + } else { + LOG(INFO) << " Trying BLKZEROOUT..."; + if (ioctl(fd, BLKZEROOUT, &range) == -1) { + PLOG(ERROR) << " Failed"; + return false; + } } - printf("Secure-wiping \"%s\" from %" PRIu64 " to %" PRIu64 ".\n", - partition.c_str(), range[0], range[1]); - - printf("Trying BLKSECDISCARD...\t"); - if (ioctl(fd, BLKSECDISCARD, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - - // Use BLKDISCARD if it zeroes out blocks, otherwise use BLKZEROOUT. - unsigned int zeroes; - if (ioctl(fd, BLKDISCARDZEROES, &zeroes) == 0 && zeroes != 0) { - printf("Trying BLKDISCARD...\t"); - if (ioctl(fd, BLKDISCARD, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - return false; - } - } else { - printf("Trying BLKZEROOUT...\t"); - if (ioctl(fd, BLKZEROOUT, &range) == -1) { - printf("failed: %s\n", strerror(errno)); - return false; - } - } - } + } - printf("done\n"); - return true; + LOG(INFO) << " Done"; + return true; } // Check if the wipe package matches expectation: @@ -863,7 +863,7 @@ static bool check_wipe_package(size_t wipe_package_size) { return false; } std::string metadata; - if (!read_metadata_from_package(&zip, &metadata)) { + if (!read_metadata_from_package(zip, &metadata)) { CloseArchive(zip); return false; } diff --git a/screen_ui.cpp b/screen_ui.cpp index 706877b4d..bb2772dd8 100644 --- a/screen_ui.cpp +++ b/screen_ui.cpp @@ -98,7 +98,7 @@ GRSurface* ScreenRecoveryUI::GetCurrentText() { } } -int ScreenRecoveryUI::PixelsFromDp(int dp) { +int ScreenRecoveryUI::PixelsFromDp(int dp) const { return dp * density_; } @@ -256,12 +256,12 @@ void ScreenRecoveryUI::DrawHorizontalRule(int* y) { *y += 4; } -void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) { +void ScreenRecoveryUI::DrawTextLine(int x, int* y, const char* line, bool bold) const { gr_text(gr_sys_font(), x, *y, line, bold); *y += char_height_ + 4; } -void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) { +void ScreenRecoveryUI::DrawTextLines(int x, int* y, const char* const* lines) const { for (size_t i = 0; lines != nullptr && lines[i] != nullptr; ++i) { DrawTextLine(x, y, lines[i], false); } diff --git a/screen_ui.h b/screen_ui.h index b2dcf4aeb..a2322c36c 100644 --- a/screen_ui.h +++ b/screen_ui.h @@ -160,14 +160,14 @@ class ScreenRecoveryUI : public RecoveryUI { void LoadBitmap(const char* filename, GRSurface** surface); void LoadLocalizedBitmap(const char* filename, GRSurface** surface); - int PixelsFromDp(int dp); + int PixelsFromDp(int dp) const; virtual int GetAnimationBaseline(); virtual int GetProgressBaseline(); virtual int GetTextBaseline(); void DrawHorizontalRule(int* y); - void DrawTextLine(int x, int* y, const char* line, bool bold); - void DrawTextLines(int x, int* y, const char* const* lines); + void DrawTextLine(int x, int* y, const char* line, bool bold) const; + void DrawTextLines(int x, int* y, const char* const* lines) const; }; #endif // RECOVERY_UI_H @@ -240,7 +240,7 @@ void RecoveryUI::ProcessKey(int key_code, int updown) { } void* RecoveryUI::time_key_helper(void* cookie) { - key_timer_t* info = (key_timer_t*) cookie; + key_timer_t* info = static_cast<key_timer_t*>(cookie); info->ui->time_key(info->key_code, info->count); delete info; return nullptr; diff --git a/updater/blockimg.cpp b/updater/blockimg.cpp index 12ca151a1..c614ccc47 100644 --- a/updater/blockimg.cpp +++ b/updater/blockimg.cpp @@ -356,7 +356,7 @@ static bool receive_new_data(const uint8_t* data, size_t size, void* cookie) { } static void* unzip_new_data(void* cookie) { - NewThreadInfo* nti = (NewThreadInfo*) cookie; + NewThreadInfo* nti = static_cast<NewThreadInfo*>(cookie); ProcessZipEntryContents(nti->za, &nti->entry, receive_new_data, nti); return nullptr; } @@ -429,46 +429,11 @@ struct CommandParameters { uint8_t* patch_start; }; -// Do a source/target load for move/bsdiff/imgdiff in version 1. -// We expect to parse the remainder of the parameter tokens as: -// -// <src_range> <tgt_range> -// -// The source range is loaded into the provided buffer, reallocating -// it to make it larger if necessary. - -static int LoadSrcTgtVersion1(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - std::vector<uint8_t>& buffer, int fd) { - - if (params.cpos + 1 >= params.tokens.size()) { - LOG(ERROR) << "invalid parameters"; - return -1; - } - - // <src_range> - RangeSet src = parse_range(params.tokens[params.cpos++]); - - // <tgt_range> - tgt = parse_range(params.tokens[params.cpos++]); - - allocate(src.size * BLOCKSIZE, buffer); - int rc = ReadBlocks(src, buffer, fd); - src_blocks = src.size; - - return rc; -} - // Print the hash in hex for corrupted source blocks (excluding the stashed blocks which is // handled separately). static void PrintHashForCorruptedSourceBlocks(const CommandParameters& params, const std::vector<uint8_t>& buffer) { LOG(INFO) << "unexpected contents of source blocks in cmd:\n" << params.cmdline; - if (params.version < 3) { - // TODO handle version 1,2 - LOG(WARNING) << "version number " << params.version << " is not supported to print hashes"; - return; - } - CHECK(params.tokens[0] == "move" || params.tokens[0] == "bsdiff" || params.tokens[0] == "imgdiff"); @@ -651,8 +616,8 @@ static void DeleteStash(const std::string& base) { } } -static int LoadStash(CommandParameters& params, const std::string& base, const std::string& id, - bool verify, size_t* blocks, std::vector<uint8_t>& buffer, bool printnoent) { +static int LoadStash(CommandParameters& params, const std::string& id, bool verify, size_t* blocks, + std::vector<uint8_t>& buffer, bool printnoent) { // In verify mode, if source range_set was saved for the given hash, // check contents in the source blocks first. If the check fails, // search for the stashed files on /cache as usual. @@ -674,17 +639,13 @@ static int LoadStash(CommandParameters& params, const std::string& base, const s } } - if (base.empty()) { - return -1; - } - size_t blockcount = 0; if (!blocks) { blocks = &blockcount; } - std::string fn = GetStashFileName(base, id, ""); + std::string fn = GetStashFileName(params.stashbase, id, ""); struct stat sb; int res = stat(fn.c_str(), &sb); @@ -735,7 +696,7 @@ static int LoadStash(CommandParameters& params, const std::string& base, const s } static int WriteStash(const std::string& base, const std::string& id, int blocks, - std::vector<uint8_t>& buffer, bool checkspace, bool *exists) { + std::vector<uint8_t>& buffer, bool checkspace, bool *exists) { if (base.empty()) { return -1; } @@ -895,52 +856,6 @@ static int CreateStash(State* state, size_t maxblocks, const std::string& blockd return 0; // Using existing directory } -static int SaveStash(CommandParameters& params, const std::string& base, - std::vector<uint8_t>& buffer, int fd, bool usehash) { - - // <stash_id> <src_range> - if (params.cpos + 1 >= params.tokens.size()) { - LOG(ERROR) << "missing id and/or src range fields in stash command"; - return -1; - } - const std::string& id = params.tokens[params.cpos++]; - - size_t blocks = 0; - if (usehash && LoadStash(params, base, id, true, &blocks, buffer, false) == 0) { - // Stash file already exists and has expected contents. Do not - // read from source again, as the source may have been already - // overwritten during a previous attempt. - return 0; - } - - RangeSet src = parse_range(params.tokens[params.cpos++]); - - allocate(src.size * BLOCKSIZE, buffer); - if (ReadBlocks(src, buffer, fd) == -1) { - return -1; - } - blocks = src.size; - stash_map[id] = src; - - if (usehash && VerifyBlocks(id, buffer, blocks, true) != 0) { - // Source blocks have unexpected contents. If we actually need this - // data later, this is an unrecoverable error. However, the command - // that uses the data may have already completed previously, so the - // possible failure will occur during source block verification. - LOG(ERROR) << "failed to load source blocks for stash " << id; - return 0; - } - - // In verify mode, we don't need to stash any blocks. - if (!params.canwrite && usehash) { - return 0; - } - - LOG(INFO) << "stashing " << blocks << " blocks to " << id; - params.stashed += blocks; - return WriteStash(base, id, blocks, buffer, false, nullptr); -} - static int FreeStash(const std::string& base, const std::string& id) { if (base.empty() || id.empty()) { return -1; @@ -980,13 +895,12 @@ static void MoveRange(std::vector<uint8_t>& dest, const RangeSet& locs, // <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...> // (loads data from both source image and stashes) // -// On return, buffer is filled with the loaded source data (rearranged -// and combined with stashed data as necessary). buffer may be -// reallocated if needed to accommodate the source data. *tgt is the -// target RangeSet. Any stashes required are loaded using LoadStash. +// On return, params.buffer is filled with the loaded source data (rearranged and combined with +// stashed data as necessary). buffer may be reallocated if needed to accommodate the source data. +// *tgt is the target RangeSet. Any stashes required are loaded using LoadStash. static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - std::vector<uint8_t>& buffer, int fd, const std::string& stashbase, bool* overlap) { + bool* overlap) { // At least it needs to provide three parameters: <tgt_range>, // <src_block_count> and "-"/<src_range>. @@ -1005,7 +919,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& return -1; } - allocate(src_blocks * BLOCKSIZE, buffer); + allocate(src_blocks * BLOCKSIZE, params.buffer); // "-" or <src_range> [<src_loc>] if (params.tokens[params.cpos] == "-") { @@ -1013,7 +927,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& params.cpos++; } else { RangeSet src = parse_range(params.tokens[params.cpos++]); - int res = ReadBlocks(src, buffer, fd); + int res = ReadBlocks(src, params.buffer, params.fd); if (overlap) { *overlap = range_overlaps(src, tgt); @@ -1029,7 +943,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& } RangeSet locs = parse_range(params.tokens[params.cpos++]); - MoveRange(buffer, locs, buffer); + MoveRange(params.buffer, locs, params.buffer); } // <[stash_id:stash_range]> @@ -1044,7 +958,7 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& } std::vector<uint8_t> stash; - int res = LoadStash(params, stashbase, tokens[0], false, nullptr, stash, true); + int res = LoadStash(params, tokens[0], false, nullptr, stash, true); if (res == -1) { // These source blocks will fail verification if used later, but we @@ -1055,32 +969,41 @@ static int LoadSrcTgtVersion2(CommandParameters& params, RangeSet& tgt, size_t& RangeSet locs = parse_range(tokens[1]); - MoveRange(buffer, locs, stash); + MoveRange(params.buffer, locs, stash); } return 0; } -// Do a source/target load for move/bsdiff/imgdiff in version 3. -// -// Parameters are the same as for LoadSrcTgtVersion2, except for 'onehash', which -// tells the function whether to expect separate source and targe block hashes, or -// if they are both the same and only one hash should be expected, and -// 'isunresumable', which receives a non-zero value if block verification fails in -// a way that the update cannot be resumed anymore. -// -// If the function is unable to load the necessary blocks or their contents don't -// match the hashes, the return value is -1 and the command should be aborted. -// -// If the return value is 1, the command has already been completed according to -// the contents of the target blocks, and should not be performed again. -// -// If the return value is 0, source blocks have expected content and the command -// can be performed. - +/** + * Do a source/target load for move/bsdiff/imgdiff in version 3. + * + * We expect to parse the remainder of the parameter tokens as one of: + * + * <tgt_range> <src_block_count> <src_range> + * (loads data from source image only) + * + * <tgt_range> <src_block_count> - <[stash_id:stash_range] ...> + * (loads data from stashes only) + * + * <tgt_range> <src_block_count> <src_range> <src_loc> <[stash_id:stash_range] ...> + * (loads data from both source image and stashes) + * + * Parameters are the same as for LoadSrcTgtVersion2, except for 'onehash', which tells the function + * whether to expect separate source and targe block hashes, or if they are both the same and only + * one hash should be expected, and 'isunresumable', which receives a non-zero value if block + * verification fails in a way that the update cannot be resumed anymore. + * + * If the function is unable to load the necessary blocks or their contents don't match the hashes, + * the return value is -1 and the command should be aborted. + * + * If the return value is 1, the command has already been completed according to the contents of the + * target blocks, and should not be performed again. + * + * If the return value is 0, source blocks have expected content and the command can be performed. + */ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& src_blocks, - bool onehash, bool& overlap) { - + bool onehash, bool& overlap) { if (params.cpos >= params.tokens.size()) { LOG(ERROR) << "missing source hash"; return -1; @@ -1099,8 +1022,7 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& tgthash = params.tokens[params.cpos++]; } - if (LoadSrcTgtVersion2(params, tgt, src_blocks, params.buffer, params.fd, - params.stashbase, &overlap) == -1) { + if (LoadSrcTgtVersion2(params, tgt, src_blocks, &overlap) == -1) { return -1; } @@ -1111,7 +1033,7 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } if (VerifyBlocks(tgthash, tgtbuffer, tgt.size, false) == 0) { - // Target blocks already have expected content, command should be skipped + // Target blocks already have expected content, command should be skipped. return 1; } @@ -1130,25 +1052,24 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } params.stashed += src_blocks; - // Can be deleted when the write has completed + // Can be deleted when the write has completed. if (!stash_exists) { params.freestash = srchash; } } - // Source blocks have expected content, command can proceed + // Source blocks have expected content, command can proceed. return 0; } - if (overlap && LoadStash(params, params.stashbase, srchash, true, nullptr, params.buffer, - true) == 0) { + if (overlap && LoadStash(params, srchash, true, nullptr, params.buffer, true) == 0) { // Overlapping source blocks were previously stashed, command can proceed. // We are recovering from an interrupted command, so we don't know if the // stash can safely be deleted after this command. return 0; } - // Valid source data not available, update cannot be resumed + // Valid source data not available, update cannot be resumed. LOG(ERROR) << "partition has unexpected contents"; PrintHashForCorruptedSourceBlocks(params, params.buffer); @@ -1158,75 +1079,101 @@ static int LoadSrcTgtVersion3(CommandParameters& params, RangeSet& tgt, size_t& } static int PerformCommandMove(CommandParameters& params) { - size_t blocks = 0; - bool overlap = false; - int status = 0; - RangeSet tgt; + size_t blocks = 0; + bool overlap = false; + RangeSet tgt; + int status = LoadSrcTgtVersion3(params, tgt, blocks, true, overlap); - if (params.version == 1) { - status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd); - } else if (params.version == 2) { - status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd, - params.stashbase, nullptr); - } else if (params.version >= 3) { - status = LoadSrcTgtVersion3(params, tgt, blocks, true, overlap); - } + if (status == -1) { + LOG(ERROR) << "failed to read blocks for move"; + return -1; + } - if (status == -1) { - LOG(ERROR) << "failed to read blocks for move"; - return -1; - } + if (status == 0) { + params.foundwrites = true; + } else if (params.foundwrites) { + LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]"; + } + if (params.canwrite) { if (status == 0) { - params.foundwrites = true; - } else if (params.foundwrites) { - LOG(WARNING) << "warning: commands executed out of order [" << params.cmdname << "]"; + LOG(INFO) << " moving " << blocks << " blocks"; + + if (WriteBlocks(tgt, params.buffer, params.fd) == -1) { + return -1; + } + } else { + LOG(INFO) << "skipping " << blocks << " already moved blocks"; } + } - if (params.canwrite) { - if (status == 0) { - LOG(INFO) << " moving " << blocks << " blocks"; + if (!params.freestash.empty()) { + FreeStash(params.stashbase, params.freestash); + params.freestash.clear(); + } - if (WriteBlocks(tgt, params.buffer, params.fd) == -1) { - return -1; - } - } else { - LOG(INFO) << "skipping " << blocks << " already moved blocks"; - } + params.written += tgt.size; - } + return 0; +} - if (!params.freestash.empty()) { - FreeStash(params.stashbase, params.freestash); - params.freestash.clear(); - } +static int PerformCommandStash(CommandParameters& params) { + // <stash_id> <src_range> + if (params.cpos + 1 >= params.tokens.size()) { + LOG(ERROR) << "missing id and/or src range fields in stash command"; + return -1; + } - params.written += tgt.size; + const std::string& id = params.tokens[params.cpos++]; + size_t blocks = 0; + if (LoadStash(params, id, true, &blocks, params.buffer, false) == 0) { + // Stash file already exists and has expected contents. Do not read from source again, as the + // source may have been already overwritten during a previous attempt. + return 0; + } + + RangeSet src = parse_range(params.tokens[params.cpos++]); + allocate(src.size * BLOCKSIZE, params.buffer); + if (ReadBlocks(src, params.buffer, params.fd) == -1) { + return -1; + } + blocks = src.size; + stash_map[id] = src; + + if (VerifyBlocks(id, params.buffer, blocks, true) != 0) { + // Source blocks have unexpected contents. If we actually need this data later, this is an + // unrecoverable error. However, the command that uses the data may have already completed + // previously, so the possible failure will occur during source block verification. + LOG(ERROR) << "failed to load source blocks for stash " << id; return 0; -} + } -static int PerformCommandStash(CommandParameters& params) { - return SaveStash(params, params.stashbase, params.buffer, params.fd, - (params.version >= 3)); + // In verify mode, we don't need to stash any blocks. + if (!params.canwrite) { + return 0; + } + + LOG(INFO) << "stashing " << blocks << " blocks to " << id; + params.stashed += blocks; + return WriteStash(params.stashbase, id, blocks, params.buffer, false, nullptr); } static int PerformCommandFree(CommandParameters& params) { - // <stash_id> - if (params.cpos >= params.tokens.size()) { - LOG(ERROR) << "missing stash id in free command"; - return -1; - } - - const std::string& id = params.tokens[params.cpos++]; + // <stash_id> + if (params.cpos >= params.tokens.size()) { + LOG(ERROR) << "missing stash id in free command"; + return -1; + } - stash_map.erase(id); + const std::string& id = params.tokens[params.cpos++]; + stash_map.erase(id); - if (params.createdstash || params.canwrite) { - return FreeStash(params.stashbase, id); - } + if (params.createdstash || params.canwrite) { + return FreeStash(params.stashbase, id); + } - return 0; + return 0; } static int PerformCommandZero(CommandParameters& params) { @@ -1337,15 +1284,7 @@ static int PerformCommandDiff(CommandParameters& params) { RangeSet tgt; size_t blocks = 0; bool overlap = false; - int status = 0; - if (params.version == 1) { - status = LoadSrcTgtVersion1(params, tgt, blocks, params.buffer, params.fd); - } else if (params.version == 2) { - status = LoadSrcTgtVersion2(params, tgt, blocks, params.buffer, params.fd, - params.stashbase, nullptr); - } else if (params.version >= 3) { - status = LoadSrcTgtVersion3(params, tgt, blocks, false, overlap); - } + int status = LoadSrcTgtVersion3(params, tgt, blocks, false, overlap); if (status == -1) { LOG(ERROR) << "failed to read blocks for diff"; @@ -1471,301 +1410,281 @@ struct Command { static Value* PerformBlockImageUpdate(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv, const Command* commands, size_t cmdcount, bool dryrun) { - CommandParameters params = {}; - params.canwrite = !dryrun; + CommandParameters params = {}; + params.canwrite = !dryrun; - LOG(INFO) << "performing " << (dryrun ? "verification" : "update"); - if (state->is_retry) { - is_retry = true; - LOG(INFO) << "This update is a retry."; - } - if (argv.size() != 4) { - ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu", - argv.size()); - return StringValue(""); - } + LOG(INFO) << "performing " << (dryrun ? "verification" : "update"); + if (state->is_retry) { + is_retry = true; + LOG(INFO) << "This update is a retry."; + } + if (argv.size() != 4) { + ErrorAbort(state, kArgsParsingFailure, "block_image_update expects 4 arguments, got %zu", + argv.size()); + return StringValue(""); + } - std::vector<std::unique_ptr<Value>> args; - if (!ReadValueArgs(state, argv, &args)) { - return nullptr; - } + std::vector<std::unique_ptr<Value>> args; + if (!ReadValueArgs(state, argv, &args)) { + return nullptr; + } - const Value* blockdev_filename = args[0].get(); - const Value* transfer_list_value = args[1].get(); - const Value* new_data_fn = args[2].get(); - const Value* patch_data_fn = args[3].get(); + const Value* blockdev_filename = args[0].get(); + const Value* transfer_list_value = args[1].get(); + const Value* new_data_fn = args[2].get(); + const Value* patch_data_fn = args[3].get(); - if (blockdev_filename->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", - name); - return StringValue(""); - } - if (transfer_list_value->type != VAL_BLOB) { - ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name); - return StringValue(""); - } - if (new_data_fn->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name); - return StringValue(""); - } - if (patch_data_fn->type != VAL_STRING) { - ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", - name); - return StringValue(""); - } + if (blockdev_filename->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "blockdev_filename argument to %s must be string", name); + return StringValue(""); + } + if (transfer_list_value->type != VAL_BLOB) { + ErrorAbort(state, kArgsParsingFailure, "transfer_list argument to %s must be blob", name); + return StringValue(""); + } + if (new_data_fn->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "new_data_fn argument to %s must be string", name); + return StringValue(""); + } + if (patch_data_fn->type != VAL_STRING) { + ErrorAbort(state, kArgsParsingFailure, "patch_data_fn argument to %s must be string", name); + return StringValue(""); + } - UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie); - if (ui == nullptr) { - return StringValue(""); - } + UpdaterInfo* ui = static_cast<UpdaterInfo*>(state->cookie); + if (ui == nullptr) { + return StringValue(""); + } - FILE* cmd_pipe = ui->cmd_pipe; - ZipArchiveHandle za = ui->package_zip; + FILE* cmd_pipe = ui->cmd_pipe; + ZipArchiveHandle za = ui->package_zip; - if (cmd_pipe == nullptr || za == nullptr) { - return StringValue(""); - } + if (cmd_pipe == nullptr || za == nullptr) { + return StringValue(""); + } - ZipString path_data(patch_data_fn->data.c_str()); - ZipEntry patch_entry; - if (FindEntry(za, path_data, &patch_entry) != 0) { - LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package"; - return StringValue(""); - } + ZipString path_data(patch_data_fn->data.c_str()); + ZipEntry patch_entry; + if (FindEntry(za, path_data, &patch_entry) != 0) { + LOG(ERROR) << name << "(): no file \"" << patch_data_fn->data << "\" in package"; + return StringValue(""); + } - params.patch_start = ui->package_zip_addr + patch_entry.offset; - ZipString new_data(new_data_fn->data.c_str()); - ZipEntry new_entry; - if (FindEntry(za, new_data, &new_entry) != 0) { - LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package"; - return StringValue(""); - } + params.patch_start = ui->package_zip_addr + patch_entry.offset; + ZipString new_data(new_data_fn->data.c_str()); + ZipEntry new_entry; + if (FindEntry(za, new_data, &new_entry) != 0) { + LOG(ERROR) << name << "(): no file \"" << new_data_fn->data << "\" in package"; + return StringValue(""); + } - params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR))); - if (params.fd == -1) { - PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed"; - return StringValue(""); - } + params.fd.reset(TEMP_FAILURE_RETRY(ota_open(blockdev_filename->data.c_str(), O_RDWR))); + if (params.fd == -1) { + PLOG(ERROR) << "open \"" << blockdev_filename->data << "\" failed"; + return StringValue(""); + } - if (params.canwrite) { - params.nti.za = za; - params.nti.entry = new_entry; - - pthread_mutex_init(¶ms.nti.mu, nullptr); - pthread_cond_init(¶ms.nti.cv, nullptr); - pthread_attr_t attr; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - - int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti); - if (error != 0) { - PLOG(ERROR) << "pthread_create failed"; - return StringValue(""); - } - } + if (params.canwrite) { + params.nti.za = za; + params.nti.entry = new_entry; - std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n"); - if (lines.size() < 2) { - ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n", - lines.size()); - return StringValue(""); - } + pthread_mutex_init(¶ms.nti.mu, nullptr); + pthread_cond_init(¶ms.nti.cv, nullptr); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); - // First line in transfer list is the version number - if (!android::base::ParseInt(lines[0], ¶ms.version, 1, 4)) { - LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]"; - return StringValue(""); + int error = pthread_create(¶ms.thread, &attr, unzip_new_data, ¶ms.nti); + if (error != 0) { + PLOG(ERROR) << "pthread_create failed"; + return StringValue(""); } + } - LOG(INFO) << "blockimg version is " << params.version; - - // Second line in transfer list is the total number of blocks we expect to write - size_t total_blocks; - if (!android::base::ParseUint(lines[1], &total_blocks)) { - ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str()); - return StringValue(""); - } + std::vector<std::string> lines = android::base::Split(transfer_list_value->data, "\n"); + if (lines.size() < 2) { + ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zd]\n", + lines.size()); + return StringValue(""); + } - if (total_blocks == 0) { - return StringValue("t"); - } + // First line in transfer list is the version number. + if (!android::base::ParseInt(lines[0], ¶ms.version, 3, 4)) { + LOG(ERROR) << "unexpected transfer list version [" << lines[0] << "]"; + return StringValue(""); + } - size_t start = 2; - if (params.version >= 2) { - if (lines.size() < 4) { - ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n", - lines.size()); - return StringValue(""); - } + LOG(INFO) << "blockimg version is " << params.version; - // Third line is how many stash entries are needed simultaneously - LOG(INFO) << "maximum stash entries " << lines[2]; + // Second line in transfer list is the total number of blocks we expect to write. + size_t total_blocks; + if (!android::base::ParseUint(lines[1], &total_blocks)) { + ErrorAbort(state, kArgsParsingFailure, "unexpected block count [%s]\n", lines[1].c_str()); + return StringValue(""); + } - // Fourth line is the maximum number of blocks that will be stashed simultaneously - size_t stash_max_blocks; - if (!android::base::ParseUint(lines[3], &stash_max_blocks)) { - ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n", - lines[3].c_str()); - return StringValue(""); - } + if (total_blocks == 0) { + return StringValue("t"); + } - int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase); - if (res == -1) { - return StringValue(""); - } + size_t start = 2; + if (lines.size() < 4) { + ErrorAbort(state, kArgsParsingFailure, "too few lines in the transfer list [%zu]\n", + lines.size()); + return StringValue(""); + } - params.createdstash = res; + // Third line is how many stash entries are needed simultaneously. + LOG(INFO) << "maximum stash entries " << lines[2]; - start += 2; - } + // Fourth line is the maximum number of blocks that will be stashed simultaneously + size_t stash_max_blocks; + if (!android::base::ParseUint(lines[3], &stash_max_blocks)) { + ErrorAbort(state, kArgsParsingFailure, "unexpected maximum stash blocks [%s]\n", + lines[3].c_str()); + return StringValue(""); + } - // Build a map of the available commands - std::unordered_map<std::string, const Command*> cmd_map; - for (size_t i = 0; i < cmdcount; ++i) { - if (cmd_map.find(commands[i].name) != cmd_map.end()) { - LOG(ERROR) << "Error: command [" << commands[i].name - << "] already exists in the cmd map."; - return StringValue(strdup("")); - } - cmd_map[commands[i].name] = &commands[i]; - } + int res = CreateStash(state, stash_max_blocks, blockdev_filename->data, params.stashbase); + if (res == -1) { + return StringValue(""); + } - int rc = -1; + params.createdstash = res; - // Subsequent lines are all individual transfer commands - for (auto it = lines.cbegin() + start; it != lines.cend(); it++) { - const std::string& line(*it); - if (line.empty()) continue; + start += 2; - params.tokens = android::base::Split(line, " "); - params.cpos = 0; - params.cmdname = params.tokens[params.cpos++].c_str(); - params.cmdline = line.c_str(); + // Build a map of the available commands + std::unordered_map<std::string, const Command*> cmd_map; + for (size_t i = 0; i < cmdcount; ++i) { + if (cmd_map.find(commands[i].name) != cmd_map.end()) { + LOG(ERROR) << "Error: command [" << commands[i].name << "] already exists in the cmd map."; + return StringValue(strdup("")); + } + cmd_map[commands[i].name] = &commands[i]; + } - if (cmd_map.find(params.cmdname) == cmd_map.end()) { - LOG(ERROR) << "unexpected command [" << params.cmdname << "]"; - goto pbiudone; - } + int rc = -1; - const Command* cmd = cmd_map[params.cmdname]; + // Subsequent lines are all individual transfer commands + for (auto it = lines.cbegin() + start; it != lines.cend(); it++) { + const std::string& line(*it); + if (line.empty()) continue; - if (cmd->f != nullptr && cmd->f(params) == -1) { - LOG(ERROR) << "failed to execute command [" << line << "]"; - goto pbiudone; - } + params.tokens = android::base::Split(line, " "); + params.cpos = 0; + params.cmdname = params.tokens[params.cpos++].c_str(); + params.cmdline = line.c_str(); - if (params.canwrite) { - if (ota_fsync(params.fd) == -1) { - failure_type = kFsyncFailure; - PLOG(ERROR) << "fsync failed"; - goto pbiudone; - } - fprintf(cmd_pipe, "set_progress %.4f\n", - static_cast<double>(params.written) / total_blocks); - fflush(cmd_pipe); - } + if (cmd_map.find(params.cmdname) == cmd_map.end()) { + LOG(ERROR) << "unexpected command [" << params.cmdname << "]"; + goto pbiudone; } - if (params.canwrite) { - pthread_join(params.thread, nullptr); - - LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks; - LOG(INFO) << "stashed " << params.stashed << " blocks"; - LOG(INFO) << "max alloc needed was " << params.buffer.size(); - - const char* partition = strrchr(blockdev_filename->data.c_str(), '/'); - if (partition != nullptr && *(partition + 1) != 0) { - fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, - params.written * BLOCKSIZE); - fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, - params.stashed * BLOCKSIZE); - fflush(cmd_pipe); - } - // Delete stash only after successfully completing the update, as it - // may contain blocks needed to complete the update later. - DeleteStash(params.stashbase); - } else { - LOG(INFO) << "verified partition contents; update may be resumed"; - } + const Command* cmd = cmd_map[params.cmdname]; - rc = 0; + if (cmd->f != nullptr && cmd->f(params) == -1) { + LOG(ERROR) << "failed to execute command [" << line << "]"; + goto pbiudone; + } -pbiudone: - if (ota_fsync(params.fd) == -1) { + if (params.canwrite) { + if (ota_fsync(params.fd) == -1) { failure_type = kFsyncFailure; PLOG(ERROR) << "fsync failed"; + goto pbiudone; + } + fprintf(cmd_pipe, "set_progress %.4f\n", static_cast<double>(params.written) / total_blocks); + fflush(cmd_pipe); } - // params.fd will be automatically closed because it's a unique_fd. + } - // Only delete the stash if the update cannot be resumed, or it's - // a verification run and we created the stash. - if (params.isunresumable || (!params.canwrite && params.createdstash)) { - DeleteStash(params.stashbase); - } + if (params.canwrite) { + pthread_join(params.thread, nullptr); - if (failure_type != kNoCause && state->cause_code == kNoCause) { - state->cause_code = failure_type; + LOG(INFO) << "wrote " << params.written << " blocks; expected " << total_blocks; + LOG(INFO) << "stashed " << params.stashed << " blocks"; + LOG(INFO) << "max alloc needed was " << params.buffer.size(); + + const char* partition = strrchr(blockdev_filename->data.c_str(), '/'); + if (partition != nullptr && *(partition + 1) != 0) { + fprintf(cmd_pipe, "log bytes_written_%s: %zu\n", partition + 1, params.written * BLOCKSIZE); + fprintf(cmd_pipe, "log bytes_stashed_%s: %zu\n", partition + 1, params.stashed * BLOCKSIZE); + fflush(cmd_pipe); } + // Delete stash only after successfully completing the update, as it may contain blocks needed + // to complete the update later. + DeleteStash(params.stashbase); + } else { + LOG(INFO) << "verified partition contents; update may be resumed"; + } - return StringValue(rc == 0 ? "t" : ""); -} + rc = 0; -// The transfer list is a text file containing commands to -// transfer data from one place to another on the target -// partition. We parse it and execute the commands in order: -// -// zero [rangeset] -// - fill the indicated blocks with zeros -// -// new [rangeset] -// - fill the blocks with data read from the new_data file -// -// erase [rangeset] -// - mark the given blocks as empty -// -// move <...> -// bsdiff <patchstart> <patchlen> <...> -// imgdiff <patchstart> <patchlen> <...> -// - read the source blocks, apply a patch (or not in the -// case of move), write result to target blocks. bsdiff or -// imgdiff specifies the type of patch; move means no patch -// at all. -// -// The format of <...> differs between versions 1 and 2; -// see the LoadSrcTgtVersion{1,2}() functions for a -// description of what's expected. -// -// stash <stash_id> <src_range> -// - (version 2+ only) load the given source range and stash -// the data in the given slot of the stash table. -// -// free <stash_id> -// - (version 3+ only) free the given stash data. -// -// The creator of the transfer list will guarantee that no block -// is read (ie, used as the source for a patch or move) after it -// has been written. -// -// In version 2, the creator will guarantee that a given stash is -// loaded (with a stash command) before it's used in a -// move/bsdiff/imgdiff command. -// -// Within one command the source and target ranges may overlap so -// in general we need to read the entire source into memory before -// writing anything to the target blocks. -// -// All the patch data is concatenated into one patch_data file in -// the update package. It must be stored uncompressed because we -// memory-map it in directly from the archive. (Since patches are -// already compressed, we lose very little by not compressing -// their concatenation.) -// -// In version 3, commands that read data from the partition (i.e. -// move/bsdiff/imgdiff/stash) have one or more additional hashes -// before the range parameters, which are used to check if the -// command has already been completed and verify the integrity of -// the source data. +pbiudone: + if (ota_fsync(params.fd) == -1) { + failure_type = kFsyncFailure; + PLOG(ERROR) << "fsync failed"; + } + // params.fd will be automatically closed because it's a unique_fd. + + // Only delete the stash if the update cannot be resumed, or it's a verification run and we + // created the stash. + if (params.isunresumable || (!params.canwrite && params.createdstash)) { + DeleteStash(params.stashbase); + } + + if (failure_type != kNoCause && state->cause_code == kNoCause) { + state->cause_code = failure_type; + } + return StringValue(rc == 0 ? "t" : ""); +} + +/** + * The transfer list is a text file containing commands to transfer data from one place to another + * on the target partition. We parse it and execute the commands in order: + * + * zero [rangeset] + * - Fill the indicated blocks with zeros. + * + * new [rangeset] + * - Fill the blocks with data read from the new_data file. + * + * erase [rangeset] + * - Mark the given blocks as empty. + * + * move <...> + * bsdiff <patchstart> <patchlen> <...> + * imgdiff <patchstart> <patchlen> <...> + * - Read the source blocks, apply a patch (or not in the case of move), write result to target + * blocks. bsdiff or imgdiff specifies the type of patch; move means no patch at all. + * + * See the comments in LoadSrcTgtVersion3() for a description of the <...> format. + * + * stash <stash_id> <src_range> + * - Load the given source range and stash the data in the given slot of the stash table. + * + * free <stash_id> + * - Free the given stash data. + * + * The creator of the transfer list will guarantee that no block is read (ie, used as the source for + * a patch or move) after it has been written. + * + * The creator will guarantee that a given stash is loaded (with a stash command) before it's used + * in a move/bsdiff/imgdiff command. + * + * Within one command the source and target ranges may overlap so in general we need to read the + * entire source into memory before writing anything to the target blocks. + * + * All the patch data is concatenated into one patch_data file in the update package. It must be + * stored uncompressed because we memory-map it in directly from the archive. (Since patches are + * already compressed, we lose very little by not compressing their concatenation.) + * + * Commands that read data from the partition (i.e. move/bsdiff/imgdiff/stash) have one or more + * additional hashes before the range parameters, which are used to check if the command has already + * been completed and verify the integrity of the source data. + */ Value* BlockImageVerifyFn(const char* name, State* state, const std::vector<std::unique_ptr<Expr>>& argv) { // Commands which are not tested are set to nullptr to skip them completely diff --git a/verifier.cpp b/verifier.cpp index e9d540cdb..23142c120 100644 --- a/verifier.cpp +++ b/verifier.cpp @@ -370,7 +370,7 @@ std::unique_ptr<RSA, RSADeleter> parse_rsa_key(FILE* file, uint32_t exponent) { } struct BNDeleter { - void operator()(BIGNUM* bn) { + void operator()(BIGNUM* bn) const { BN_free(bn); } }; diff --git a/verifier.h b/verifier.h index 6bee74947..6fa8f2b0a 100644 --- a/verifier.h +++ b/verifier.h @@ -26,13 +26,13 @@ #include <openssl/sha.h> struct RSADeleter { - void operator()(RSA* rsa) { + void operator()(RSA* rsa) const { RSA_free(rsa); } }; struct ECKEYDeleter { - void operator()(EC_KEY* ec_key) { + void operator()(EC_KEY* ec_key) const { EC_KEY_free(ec_key); } }; diff --git a/wear_touch.cpp b/wear_touch.cpp index cf33daa9f..e2ab44d2d 100644 --- a/wear_touch.cpp +++ b/wear_touch.cpp @@ -118,7 +118,7 @@ void WearSwipeDetector::run() { } void* WearSwipeDetector::touch_thread(void* cookie) { - ((WearSwipeDetector*)cookie)->run(); + (static_cast<WearSwipeDetector*>(cookie))->run(); return NULL; } |