def __init__(self, data, trim=False, pad=False): self.data = data self.blocksize = 4096 assert not (trim and pad) partial = len(self.data) % self.blocksize padded = False if partial > 0: if trim: self.data = self.data[:-partial] elif pad: self.data += '\0' * (self.blocksize - partial) padded = True else: raise ValueError(("data for DataImage must be multiple of %d bytes " "unless trim or pad is specified") % (self.blocksize,)) assert len(self.data) % self.blocksize == 0 self.total_blocks = len(self.data) / self.blocksize self.care_map = RangeSet(data=(0, self.total_blocks)) # When the last block is padded, we always write the whole block even for # incremental OTAs. Because otherwise the last block may get skipped if # unchanged for an incremental, but would fail the post-install # verification if it has non-zero contents in the padding bytes. # Bug: 23828506 if padded: clobbered_blocks = [self.total_blocks-1, self.total_blocks] else: clobbered_blocks = [] self.clobbered_blocks = clobbered_blocks self.extended = RangeSet() zero_blocks = [] nonzero_blocks = [] reference = '\0' * self.blocksize for i in range(self.total_blocks-1 if padded else self.total_blocks): d = self.data[i*self.blocksize : (i+1)*self.blocksize] if d == reference: zero_blocks.append(i) zero_blocks.append(i+1) else: nonzero_blocks.append(i) nonzero_blocks.append(i+1) assert zero_blocks or nonzero_blocks or clobbered_blocks self.file_map = dict() if zero_blocks: self.file_map["__ZERO"] = RangeSet(data=zero_blocks) if nonzero_blocks: self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks) if clobbered_blocks: self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
def test_next_item(self): self.assertEqual( list(RangeSet("0-9").next_item()), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) self.assertEqual( list(RangeSet("10-19 3-5").next_item()), [3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]) self.assertEqual( list(RangeSet("10-19 3 5 7").next_item()), [3, 5, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
def test_ValidateFileConsistency_incompleteRange(self): input_tmp = common.MakeTempDir() os.mkdir(os.path.join(input_tmp, 'IMAGES')) system_image = os.path.join(input_tmp, 'IMAGES', 'system.img') system_root = os.path.join(input_tmp, "SYSTEM") os.mkdir(system_root) # Write the test file that contain multiple blocks of zeros, and these # zero blocks will be omitted by kernel. And the test files will occupy one # block range each in the final system image. with open(os.path.join(system_root, 'a'), 'w') as f: f.write("aaa") f.write('\0' * 4096 * 3) with open(os.path.join(system_root, 'b'), 'w') as f: f.write("bbb") f.write('\0' * 4096 * 3) raw_file_map = os.path.join(input_tmp, 'IMAGES', 'raw_system.map') self._generate_system_image(system_image, system_root, raw_file_map) # Parse the generated file map and update the block ranges for each file. file_map_list = {} image_ranges = RangeSet() with open(raw_file_map, 'r') as f: for line in f.readlines(): info = line.split() self.assertEqual(2, len(info)) image_ranges = image_ranges.union(RangeSet(info[1])) file_map_list[info[0]] = RangeSet(info[1]) # Add one unoccupied block as the shared block for all test files. mock_shared_block = RangeSet("10-20").subtract(image_ranges).first(1) with open(os.path.join(input_tmp, 'IMAGES', 'system.map'), 'w') as f: for key in sorted(file_map_list.keys()): line = "{} {}\n".format( key, file_map_list[key].union(mock_shared_block)) f.write(line) # Prepare for the target zip file input_file = common.MakeTempFile() all_entries = [ 'SYSTEM/', 'SYSTEM/b', 'SYSTEM/a', 'IMAGES/', 'IMAGES/system.map', 'IMAGES/system.img' ] with zipfile.ZipFile(input_file, 'w') as input_zip: for name in all_entries: input_zip.write(os.path.join(input_tmp, name), arcname=name) input_zip = zipfile.ZipFile(input_file, 'r') info_dict = {'extfs_sparse_flag': '-s'} # Expect the validation to pass and both files are skipped due to # 'incomplete' block range. ValidateFileConsistency(input_zip, input_tmp, info_dict)
def test_Generate(self): image_file = sparse_img.SparseImage(self._generate_image()) generator = CreateHashtreeInfoGenerator('system', 4096, self.prop_dict) info = generator.Generate(image_file) self.assertEqual(RangeSet(data=[0, 991232 / 4096]), info.filesystem_range) self.assertEqual(RangeSet(data=[991232 / 4096, (991232 + 12288) / 4096]), info.hashtree_range) self.assertEqual(self.hash_algorithm, info.hash_algorithm) self.assertEqual(self.fixed_salt, info.salt) self.assertEqual(self.expected_root_hash, info.root_hash)
def test_AddCareMapTxtForAbOta(self): image_paths = self._test_AddCareMapTxtForAbOta() AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt') expected = [ 'system', RangeSet("0-5 10-15").to_string_raw(), 'vendor', RangeSet("0-9").to_string_raw() ] self._verifyCareMap(expected, care_map_file)
def test_CanUseImgdiff_ineligible(self): # Disabled by caller. block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage(), disable_imgdiff=True) self.assertFalse( block_image_diff.CanUseImgdiff("/system/app/app1.apk", RangeSet("10-15"), RangeSet("0-5"))) # Unsupported file type. block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage()) self.assertFalse( block_image_diff.CanUseImgdiff("/system/bin/gzip", RangeSet("10-15"), RangeSet("0-5"))) # At least one of the ranges is in non-monotonic order. self.assertFalse( block_image_diff.CanUseImgdiff("/system/app/app2.apk", RangeSet("10-15"), RangeSet("15-20 30 10-14"))) # At least one of the ranges has been modified. src_ranges = RangeSet("0-5") src_ranges.extra['trimmed'] = True self.assertFalse( block_image_diff.CanUseImgdiff("/vendor/app/app3.apk", RangeSet("10-15"), src_ranges))
def test_ValidateHashtree_failure(self): generator = VerifiedBootVersion1HashtreeInfoGenerator( self.partition_size, 4096, True) generator.image = sparse_img.SparseImage(self._GenerateImage()) generator.hashtree_info = info = HashtreeInfo() info.filesystem_range = RangeSet(data=[0, 991232 // 4096]) info.hashtree_range = RangeSet( data=[991232 // 4096, (991232 + 12288) // 4096]) info.hash_algorithm = self.hash_algorithm info.salt = self.fixed_salt info.root_hash = "a" + self.expected_root_hash[1:] self.assertFalse(generator.ValidateHashtree())
def test_AddCareMapForAbOta(self): image_paths = self._test_AddCareMapForAbOta() AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", 'vendor', RangeSet("0-9").to_string_raw(), "ro.vendor.build.fingerprint", "google/sailfish/678:user/dev-keys"] self._verifyCareMap(expected, care_map_file)
def test_CanUseImgdiff_ineligible(self): # Disabled by caller. block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage(), disable_imgdiff=True) self.assertFalse( block_image_diff.CanUseImgdiff("/system/app/app1.apk", RangeSet("10-15"), RangeSet("0-5"))) # Unsupported file type. block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage()) self.assertFalse( block_image_diff.CanUseImgdiff("/system/bin/gzip", RangeSet("10-15"), RangeSet("0-5"))) # At least one of the ranges is in non-monotonic order. self.assertFalse( block_image_diff.CanUseImgdiff("/system/app/app2.apk", RangeSet("10-15"), RangeSet("15-20 30 10-14"))) # At least one of the ranges is incomplete. src_ranges = RangeSet("0-5") src_ranges.extra['incomplete'] = True self.assertFalse( block_image_diff.CanUseImgdiff("/vendor/app/app4.apk", RangeSet("10-15"), src_ranges)) # The stats are correctly logged. self.assertDictEqual( { ImgdiffStats.SKIPPED_NONMONOTONIC: {'/system/app/app2.apk'}, ImgdiffStats.SKIPPED_INCOMPLETE: {'/vendor/app/app4.apk'}, }, block_image_diff.imgdiff_stats.stats)
class EmptyImage(Image): """A zero-length image.""" blocksize = 4096 care_map = RangeSet() clobbered_blocks = RangeSet() extended = RangeSet() total_blocks = 0 file_map = {} def ReadRangeSet(self, ranges): return () def TotalSha1(self, include_clobbered_blocks=False): # EmptyImage always carries empty clobbered_blocks, so # include_clobbered_blocks can be ignored. assert self.clobbered_blocks.size() == 0 return sha1().hexdigest()
def test_AddCareMapTxtForAbOta_withNonCareMapPartitions(self): """Partitions without care_map should be ignored.""" image_paths = self._test_AddCareMapTxtForAbOta() AddCareMapTxtForAbOta(None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt') expected = [ 'system', RangeSet("0-5 10-15").to_string_raw(), 'vendor', RangeSet("0-9").to_string_raw() ] self._verifyCareMap(expected, care_map_file)
def test_AddCareMapTxtForAbOta(self): image_paths = self._test_AddCareMapTxtForAbOta() AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt') with open(care_map_file, 'r') as verify_fp: care_map = verify_fp.read() lines = care_map.split('\n') self.assertEqual(4, len(lines)) self.assertEqual('system', lines[0]) self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1]) self.assertEqual('vendor', lines[2]) self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
def test_CanUseImgdiff(self): block_image_diff = BlockImageDiff(EmptyImage(), EmptyImage()) self.assertTrue( block_image_diff.CanUseImgdiff("/system/app/app1.apk", RangeSet("10-15"), RangeSet("0-5"))) self.assertTrue( block_image_diff.CanUseImgdiff("/vendor/app/app2.apk", RangeSet("20 25"), RangeSet("30-31"), True)) self.assertDictEqual( { ImgdiffStats.USED_IMGDIFF: {"/system/app/app1.apk"}, ImgdiffStats.USED_IMGDIFF_LARGE_APK: {"/vendor/app/app2.apk"}, }, block_image_diff.imgdiff_stats.stats)
def test_AddCareMapForAbOta_noFingerprint(self): """Tests the case for partitions without fingerprint.""" image_paths = self._test_AddCareMapForAbOta() OPTIONS.info_dict = { 'system_verity_block_device': '/dev/block/system', 'vendor_verity_block_device': '/dev/block/vendor', } AddCareMapForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown", "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown", "unknown"] self._verifyCareMap(expected, care_map_file)
def test_AddCareMapForAbOta_withNonCareMapPartitions(self): """Partitions without care_map should be ignored.""" image_paths = self._test_AddCareMapForAbOta() AddCareMapForAbOta( None, ['boot', 'system', 'vendor', 'vbmeta'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", 'vendor', RangeSet("0-9").to_string_raw(), "ro.vendor.build.fingerprint", "google/sailfish/678:user/dev-keys"] self._verifyCareMap(expected, care_map_file)
def _ParseHashtreeMetadata(self): """Parses the hash_algorithm, root_hash, salt from the metadata block.""" metadata_start = self.filesystem_size + self.hashtree_size metadata_range = RangeSet( data=[metadata_start // self.block_size, (metadata_start + self.metadata_size) // self.block_size]) meta_data = b''.join(self.image.ReadRangeSet(metadata_range)) # More info about the metadata structure available in: # system/extras/verity/build_verity_metadata.py META_HEADER_SIZE = 268 header_bin = meta_data[0:META_HEADER_SIZE] header = struct.unpack("II256sI", header_bin) # header: magic_number, version, signature, table_len assert header[0] == 0xb001b001, header[0] table_len = header[3] verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len] table_entries = verity_table.rstrip().split() # Expected verity table format: "1 block_device block_device block_size # block_size data_blocks data_blocks hash_algorithm root_hash salt" assert len(table_entries) == 10, "Unexpected verity table size {}".format( len(table_entries)) assert (int(table_entries[3]) == self.block_size and int(table_entries[4]) == self.block_size) assert (int(table_entries[5]) * self.block_size == self.filesystem_size and int(table_entries[6]) * self.block_size == self.filesystem_size) self.hashtree_info.hash_algorithm = table_entries[7].decode() self.hashtree_info.root_hash = table_entries[8].decode() self.hashtree_info.salt = table_entries[9].decode()
def __init__(self, tgt, src=None, threads=None, version=4, disable_imgdiff=False): if threads is None: threads = multiprocessing.cpu_count() // 2 if threads == 0: threads = 1 self.threads = threads self.version = version self.transfers = [] self.src_basenames = {} self.src_numpatterns = {} self._max_stashed_size = 0 self.touched_src_ranges = RangeSet() self.touched_src_sha1 = None self.disable_imgdiff = disable_imgdiff assert version in (1, 2, 3, 4) self.tgt = tgt if src is None: src = EmptyImage() self.src = src # The updater code that installs the patch always uses 4k blocks. assert tgt.blocksize == 4096 assert src.blocksize == 4096 # The range sets in each filemap should comprise a partition of # the care map. self.AssertPartition(src.care_map, src.file_map.values()) self.AssertPartition(tgt.care_map, tgt.file_map.values())
def AssertSequenceGood(self): # Simulate the sequences of transfers we will output, and check that: # - we never read a block after writing it, and # - we write every block we care about exactly once. # Start with no blocks having been touched yet. touched = RangeSet() # Imagine processing the transfers in order. for xf in self.transfers: # Check that the input blocks for this transfer haven't yet been touched. x = xf.src_ranges if self.version >= 2: for _, sr in xf.use_stash: x = x.subtract(sr) assert not touched.overlaps(x) # Check that the output blocks for this transfer haven't yet been touched. assert not touched.overlaps(xf.tgt_ranges) # Touch all the blocks written by this transfer. touched = touched.union(xf.tgt_ranges) # Check that we've written every target block. assert touched == self.tgt.care_map
def AssertPartition(total, seq): """Assert that all the RangeSets in 'seq' form a partition of the 'total' RangeSet (ie, they are nonintersecting and their union equals 'total').""" so_far = RangeSet() for i in seq: assert not so_far.overlaps(i) so_far = so_far.union(i) assert so_far == total
def test_AddCareMapTxtForAbOta_zipOutput(self): """Tests the case with ZIP output.""" image_paths = self._test_AddCareMapTxtForAbOta() output_file = common.MakeTempFile(suffix='.zip') with zipfile.ZipFile(output_file, 'w') as output_zip: AddCareMapTxtForAbOta(output_zip, ['system', 'vendor'], image_paths) with zipfile.ZipFile(output_file, 'r') as verify_zip: care_map = verify_zip.read('META/care_map.txt').decode('ascii') lines = care_map.split('\n') self.assertEqual(4, len(lines)) self.assertEqual('system', lines[0]) self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1]) self.assertEqual('vendor', lines[2]) self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
def test_AddCareMapTxtForAbOta_withAvb(self): """Tests the case for device using AVB.""" image_paths = self._test_AddCareMapTxtForAbOta() OPTIONS.info_dict = { 'avb_system_hashtree_enable': 'true', 'avb_vendor_hashtree_enable': 'true', } AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt') expected = [ 'system', RangeSet("0-5 10-15").to_string_raw(), 'vendor', RangeSet("0-9").to_string_raw() ] self._verifyCareMap(expected, care_map_file)
def test_GetCareMap_nonSparseImage(self): OPTIONS.info_dict = { 'system_image_size' : 53248, } # 'foo' is the image filename, which is expected to be not used by # GetCareMap(). name, care_map = GetCareMap('system', 'foo') self.assertEqual('system', name) self.assertEqual(RangeSet("0-12").to_string_raw(), care_map)
def __init__(self, path, hashtree_info_generator=None): self.path = path self.blocksize = 4096 self._file_size = os.path.getsize(self.path) self._file = open(self.path, 'rb') if self._file_size % self.blocksize != 0: raise ValueError( "Size of file %s must be multiple of %d bytes, but is %d" % self.path, self.blocksize, self._file_size) self.total_blocks = self._file_size // self.blocksize self.care_map = RangeSet(data=(0, self.total_blocks)) self.clobbered_blocks = RangeSet() self.extended = RangeSet() self.generator_lock = threading.Lock() self.hashtree_info = None if hashtree_info_generator: self.hashtree_info = hashtree_info_generator.Generate(self) zero_blocks = [] nonzero_blocks = [] reference = '\0' * self.blocksize for i in range(self.total_blocks): d = self._file.read(self.blocksize) if d == reference: zero_blocks.append(i) zero_blocks.append(i + 1) else: nonzero_blocks.append(i) nonzero_blocks.append(i + 1) assert zero_blocks or nonzero_blocks self.file_map = {} if zero_blocks: self.file_map["__ZERO"] = RangeSet(data=zero_blocks) if nonzero_blocks: self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks) if self.hashtree_info: self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
def test_GetCareMap(self): sparse_image = test_utils.construct_sparse_image([(0xCAC1, 6), (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { 'system_adjusted_partition_size': 12, } name, care_map = GetCareMap('system', sparse_image) self.assertEqual('system', name) self.assertEqual(RangeSet("0-5 10-12").to_string_raw(), care_map)
class EmptyImage(Image): """A zero-length image.""" blocksize = 4096 care_map = RangeSet() total_blocks = 0 file_map = {} def ReadRangeSet(self, ranges): return () def TotalSha1(self): return sha1().hexdigest()
def test_AddCareMapForAbOta_zipOutput(self): """Tests the case with ZIP output.""" image_paths = self._test_AddCareMapForAbOta() output_file = common.MakeTempFile(suffix='.zip') with zipfile.ZipFile(output_file, 'w') as output_zip: AddCareMapForAbOta(output_zip, ['system', 'vendor'], image_paths) care_map_name = "META/care_map.pb" temp_dir = common.MakeTempDir() with zipfile.ZipFile(output_file, 'r') as verify_zip: self.assertTrue(care_map_name in verify_zip.namelist()) verify_zip.extract(care_map_name, path=temp_dir) expected = [ 'system', RangeSet("0-5 10-15").to_string_raw(), 'vendor', RangeSet("0-9").to_string_raw() ] self._verifyCareMap(expected, os.path.join(temp_dir, care_map_name))
def test_AddCareMapTxtForAbOta_withAvb(self): """Tests the case for device using AVB.""" image_paths = self._test_AddCareMapTxtForAbOta() OPTIONS.info_dict = { 'avb_system_hashtree_enable': 'true', 'avb_vendor_hashtree_enable': 'true', } AddCareMapTxtForAbOta(None, ['system', 'vendor'], image_paths) care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.txt') with open(care_map_file, 'r') as verify_fp: care_map = verify_fp.read() lines = care_map.split('\n') self.assertEqual(4, len(lines)) self.assertEqual('system', lines[0]) self.assertEqual(RangeSet("0-5 10-15").to_string_raw(), lines[1]) self.assertEqual('vendor', lines[2]) self.assertEqual(RangeSet("0-9").to_string_raw(), lines[3])
def test_GetCareMap(self): sparse_image = test_utils.construct_sparse_image([(0xCAC1, 6), (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { 'extfs_sparse_flag': '-s', 'system_image_size': 53248, } name, care_map = GetCareMap('system', sparse_image) self.assertEqual('system', name) self.assertEqual(RangeSet("0-5 10-12").to_string_raw(), care_map)
def test_GetSparseImage_sharedBlocks_allowed(self): """Tests the case for target using BOARD_EXT4_SHARE_DUP_BLOCKS := true.""" target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip') with zipfile.ZipFile(target_files, 'w') as target_files_zip: # Construct an image with a care_map of "0-5 9-12". target_files_zip.write( test_utils.construct_sparse_image([(0xCAC2, 16)]), arcname='IMAGES/system.img') # Block 10 is shared between two files. target_files_zip.writestr( 'IMAGES/system.map', '\n'.join([ '/system/file1 1-5 9-10', '/system/file2 10-12'])) target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 7)) target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3)) tempdir = common.UnzipTemp(target_files) with zipfile.ZipFile(target_files, 'r') as input_zip: sparse_image = common.GetSparseImage('system', tempdir, input_zip, True) self.assertDictEqual( { '__COPY': RangeSet("0"), '__NONZERO-0': RangeSet("6-8 13-15"), '/system/file1': RangeSet("1-5 9-10"), '/system/file2': RangeSet("11-12"), }, sparse_image.file_map) # '/system/file2' should be marked with 'uses_shared_blocks', but not with # 'incomplete'. self.assertTrue( sparse_image.file_map['/system/file2'].extra['uses_shared_blocks']) self.assertNotIn( 'incomplete', sparse_image.file_map['/system/file2'].extra) # All other entries should look normal without any tags. self.assertFalse(sparse_image.file_map['__COPY'].extra) self.assertFalse(sparse_image.file_map['__NONZERO-0'].extra) self.assertFalse(sparse_image.file_map['/system/file1'].extra)
def __init__(self, data, trim=False, pad=False): self.data = data self.blocksize = 4096 assert not (trim and pad) partial = len(self.data) % self.blocksize if partial > 0: if trim: self.data = self.data[:-partial] elif pad: self.data += '\0' * (self.blocksize - partial) else: raise ValueError( ("data for DataImage must be multiple of %d bytes " "unless trim or pad is specified") % (self.blocksize, )) assert len(self.data) % self.blocksize == 0 self.total_blocks = len(self.data) / self.blocksize self.care_map = RangeSet(data=(0, self.total_blocks)) self.clobbered_blocks = RangeSet() self.extended = RangeSet() zero_blocks = [] nonzero_blocks = [] reference = '\0' * self.blocksize for i in range(self.total_blocks): d = self.data[i * self.blocksize:(i + 1) * self.blocksize] if d == reference: zero_blocks.append(i) zero_blocks.append(i + 1) else: nonzero_blocks.append(i) nonzero_blocks.append(i + 1) self.file_map = { "__ZERO": RangeSet(zero_blocks), "__NONZERO": RangeSet(nonzero_blocks) }