def ExtractApexPayloadAndSignApks(self, apk_entries, apk_keys): """Extracts the payload image and signs the containing apk files.""" if not os.path.exists(self.debugfs_path): raise ApexSigningError( "Couldn't find location of debugfs_static: " + "Path {} does not exist. ".format(debugfs_path) + "Make sure bin/debugfs_static can be found in -p <path>") payload_dir = common.MakeTempDir() extract_cmd = [ 'deapexer', '--debugfs_path', self.debugfs_path, 'extract', self.apex_path, payload_dir ] common.RunAndCheckOutput(extract_cmd) has_signed_apk = False for entry in apk_entries: apk_path = os.path.join(payload_dir, entry) assert os.path.exists(self.apex_path) key_name = apk_keys.get(os.path.basename(entry)) if key_name in common.SPECIAL_CERT_STRINGS: logger.info('Not signing: %s due to special cert string', apk_path) continue logger.info('Signing apk file %s in apex %s', apk_path, self.apex_path) # Rename the unsigned apk and overwrite the original apk path with the # signed apk file. unsigned_apk = common.MakeTempFile() os.rename(apk_path, unsigned_apk) common.SignFile( unsigned_apk, apk_path, key_name, self.key_passwords, codename_to_api_level_map=self.codename_to_api_level_map) has_signed_apk = True return payload_dir, has_signed_apk
def AddCache(output_zip): """Create an empty cache image and store it in output_zip.""" img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "cache.img") if os.path.exists(img.name): logger.info("cache.img already exists; no need to rebuild...") return image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache") # The build system has to explicitly request for cache.img. if "fs_type" not in image_props: return logger.info("creating cache.img...") image_props["timestamp"] = FIXED_FILE_TIMESTAMP user_dir = common.MakeTempDir() build_image.BuildImage(user_dir, image_props, img.name) common.CheckSize(img.name, "cache.img", OPTIONS.info_dict) img.Write()
def test_AddCareMapForAbOta_zipOutput(self): """Tests the case with ZIP output.""" image_paths = self._test_AddCareMapForAbOta() output_file = common.MakeTempFile(suffix='.zip') with zipfile.ZipFile(output_file, 'w') as output_zip: AddCareMapForAbOta(output_zip, ['system', 'vendor'], image_paths) care_map_name = "META/care_map.pb" temp_dir = common.MakeTempDir() with zipfile.ZipFile(output_file, 'r') as verify_zip: self.assertTrue(care_map_name in verify_zip.namelist()) verify_zip.extract(care_map_name, path=temp_dir) expected = [ 'system', RangeSet("0-5 10-15").to_string_raw(), "ro.system.build.fingerprint", "google/sailfish/12345:user/dev-keys", 'vendor', RangeSet("0-9").to_string_raw(), "ro.vendor.build.fingerprint", "google/sailfish/678:user/dev-keys" ] self._verifyCareMap(expected, os.path.join(temp_dir, care_map_name))
def test_ValidateVerifiedBootImages_bootImage_corrupted(self): input_tmp = common.MakeTempDir() os.mkdir(os.path.join(input_tmp, 'IMAGES')) boot_image = os.path.join(input_tmp, 'IMAGES', 'boot.img') self._generate_boot_image(boot_image) # Corrupt the late byte of the image. with open(boot_image, 'r+b') as boot_fp: boot_fp.seek(-1, os.SEEK_END) last_byte = boot_fp.read(1) last_byte = bytes([255 - ord(last_byte)]) boot_fp.seek(-1, os.SEEK_END) boot_fp.write(last_byte) info_dict = { 'boot_signer' : 'true', } options = { 'verity_key' : os.path.join(self.testdata_dir, 'testkey.x509.pem'), } self.assertRaises( AssertionError, ValidateVerifiedBootImages, input_tmp, info_dict, options)
def AddUserdata(output_zip): """Create a userdata image and store it in output_zip. In most case we just create and store an empty userdata.img; But the invoker can also request to create userdata.img with real data from the target files, by setting "userdata_img_with_data=true" in OPTIONS.info_dict. """ img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "userdata.img") if os.path.exists(img.input_name): print("userdata.img already exists; no need to rebuild...") return # Skip userdata.img if no size. image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "data") if not image_props.get("partition_size"): return print("creating userdata.img...") image_props["timestamp"] = FIXED_FILE_TIMESTAMP if OPTIONS.info_dict.get("userdata_img_with_data") == "true": user_dir = os.path.join(OPTIONS.input_tmp, "DATA") else: user_dir = common.MakeTempDir() fstab = OPTIONS.info_dict["fstab"] if fstab: image_props["fs_type"] = fstab["/data"].fs_type succ = build_image.BuildImage(user_dir, image_props, img.name) assert succ, "build userdata.img image failed" common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict) img.Write()
def test_GetFilesystemCharacteristics(self): input_dir = common.MakeTempDir() output_image = common.MakeTempFile(suffix='.img') command = [ 'mkuserimg_mke2fs', input_dir, output_image, 'ext4', '/system', '409600', '-j', '0' ] proc = common.Run(command) proc.communicate() self.assertEqual(0, proc.returncode) output_file = common.MakeTempFile(suffix='.img') cmd = ["img2simg", output_image, output_file] p = common.Run(cmd) p.communicate() self.assertEqual(0, p.returncode) fs_dict = GetFilesystemCharacteristics(output_file) self.assertEqual(int(fs_dict['Block size']), 4096) self.assertGreaterEqual(int(fs_dict['Free blocks']), 0) # expect ~88 self.assertGreater(int(fs_dict['Inode count']), 0) # expect ~64 self.assertGreaterEqual(int(fs_dict['Free inodes']), 0) # expect ~53 self.assertGreater(int(fs_dict['Inode count']), int(fs_dict['Free inodes']))
def test_CheckHeadroom_WithMke2fsOutput(self): """Tests the result parsing from actual call to mke2fs.""" input_dir = common.MakeTempDir() output_image = common.MakeTempFile(suffix='.img') command = ['mkuserimg_mke2fs.sh', input_dir, output_image, 'ext4', '/system', '409600', '-j', '0'] ext4fs_output, exit_code = RunCommand(command) self.assertEqual(0, exit_code) prop_dict = { 'fs_type' : 'ext4', 'partition_headroom' : '40960', 'mount_point' : 'system', } self.assertTrue(CheckHeadroom(ext4fs_output, prop_dict)) prop_dict = { 'fs_type' : 'ext4', 'partition_headroom' : '413696', 'mount_point' : 'system', } self.assertFalse(CheckHeadroom(ext4fs_output, prop_dict)) common.Cleanup()
def SignUncompressedApex(avbtool, apex_file, payload_key, container_key, container_pw, apk_keys, codename_to_api_level_map, no_hashtree, signing_args=None): """Signs the current uncompressed APEX with the given payload/container keys. Args: apex_file: Uncompressed APEX file. payload_key: The path to payload signing key (w/ extension). container_key: The path to container signing key (w/o extension). container_pw: The matching password of the container_key, or None. apk_keys: A dict that holds the signing keys for apk files. codename_to_api_level_map: A dict that maps from codename to API level. no_hashtree: Don't include hashtree in the signed APEX. signing_args: Additional args to be passed to the payload signer. Returns: The path to the signed APEX file. """ # 1. Extract the apex payload image and sign the containing apk files. Repack # the apex file after signing. apk_signer = ApexApkSigner(apex_file, container_pw, codename_to_api_level_map) apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args) # 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given # payload_key. payload_dir = common.MakeTempDir(prefix='apex-payload-') with zipfile.ZipFile(apex_file) as apex_fd: payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir) zip_items = apex_fd.namelist() payload_info = ParseApexPayloadInfo(avbtool, payload_file) if no_hashtree is None: no_hashtree = payload_info.get("Tree Size", 0) == 0 SignApexPayload( avbtool, payload_file, payload_key, payload_info['apex.key'], payload_info['Algorithm'], payload_info['Salt'], payload_info['Hash Algorithm'], no_hashtree, signing_args) # 2b. Update the embedded payload public key. payload_public_key = common.ExtractAvbPublicKey(avbtool, payload_key) common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE) if APEX_PUBKEY in zip_items: common.ZipDelete(apex_file, APEX_PUBKEY) apex_zip = zipfile.ZipFile(apex_file, 'a', allowZip64=True) common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE) common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY) common.ZipClose(apex_zip) # 3. Sign the APEX container with container_key. signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') # Specify the 4K alignment when calling SignApk. extra_signapk_args = OPTIONS.extra_signapk_args[:] extra_signapk_args.extend(['-a', '4096', '--align-file-size']) password = container_pw.get(container_key) if container_pw else None common.SignFile( apex_file, signed_apex, container_key, password, codename_to_api_level_map=codename_to_api_level_map, extra_signapk_args=extra_signapk_args) return signed_apex
def setUp(self): OPTIONS.input_tmp = common.MakeTempDir()
def SignApex(apex_data, payload_key, container_key, container_pw, codename_to_api_level_map, signing_args=None): """Signs the current APEX with the given payload/container keys. Args: apex_data: Raw APEX data. payload_key: The path to payload signing key (w/ extension). container_key: The path to container signing key (w/o extension). container_pw: The matching password of the container_key, or None. codename_to_api_level_map: A dict that maps from codename to API level. signing_args: Additional args to be passed to the payload signer. Returns: The path to the signed APEX file. """ apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex') with open(apex_file, 'wb') as apex_fp: apex_fp.write(apex_data) APEX_PAYLOAD_IMAGE = 'apex_payload.img' APEX_PUBKEY = 'apex_pubkey' # 1a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given # payload_key. payload_dir = common.MakeTempDir(prefix='apex-payload-') with zipfile.ZipFile(apex_file) as apex_fd: payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir) zip_items = apex_fd.namelist() payload_info = ParseApexPayloadInfo(payload_file) SignApexPayload(payload_file, payload_key, payload_info['apex.key'], payload_info['Algorithm'], payload_info['Salt'], signing_args) # 1b. Update the embedded payload public key. payload_public_key = common.ExtractAvbPublicKey(payload_key) common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE) if APEX_PUBKEY in zip_items: common.ZipDelete(apex_file, APEX_PUBKEY) apex_zip = zipfile.ZipFile(apex_file, 'a') common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE) common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY) common.ZipClose(apex_zip) # 2. Align the files at page boundary (same as in apexer). aligned_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') common.RunAndCheckOutput( ['zipalign', '-f', '4096', apex_file, aligned_apex]) # 3. Sign the APEX container with container_key. signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex') # Specify the 4K alignment when calling SignApk. extra_signapk_args = OPTIONS.extra_signapk_args[:] extra_signapk_args.extend(['-a', '4096']) common.SignFile(aligned_apex, signed_apex, container_key, container_pw, codename_to_api_level_map=codename_to_api_level_map, extra_signapk_args=extra_signapk_args) return signed_apex
def BuildImage(in_dir, prop_dict, out_file, target_out=None): """Build an image to out_file from in_dir with property prop_dict. Args: in_dir: path of input directory. prop_dict: property dictionary. out_file: path of the output image file. target_out: path of the product out directory to read device specific FS config files. Returns: True iff the image is built successfully. """ # system_root_image=true: build a system.img that combines the contents of # /system and the ramdisk, and can be mounted at the root of the file system. origin_in = in_dir fs_config = prop_dict.get("fs_config") if (prop_dict.get("system_root_image") == "true" and prop_dict["mount_point"] == "system"): in_dir = common.MakeTempDir() # Change the mount point to "/". prop_dict["mount_point"] = "/" if fs_config: # We need to merge the fs_config files of system and ramdisk. merged_fs_config = common.MakeTempFile(prefix="root_fs_config", suffix=".txt") with open(merged_fs_config, "w") as fw: if "ramdisk_fs_config" in prop_dict: with open(prop_dict["ramdisk_fs_config"]) as fr: fw.writelines(fr.readlines()) with open(fs_config) as fr: fw.writelines(fr.readlines()) fs_config = merged_fs_config build_command = [] fs_type = prop_dict.get("fs_type", "") run_e2fsck = False fs_spans_partition = True if fs_type.startswith("squash"): fs_spans_partition = False is_verity_partition = "verity_block_device" in prop_dict verity_supported = prop_dict.get("verity") == "true" verity_fec_supported = prop_dict.get("verity_fec") == "true" # Adjust the partition size to make room for the hashes if this is to be # verified. if verity_supported and is_verity_partition: partition_size = int(prop_dict.get("partition_size")) (adjusted_size, verity_size) = AdjustPartitionSizeForVerity(partition_size, verity_fec_supported) if not adjusted_size: return False prop_dict["partition_size"] = str(adjusted_size) prop_dict["original_partition_size"] = str(partition_size) prop_dict["verity_size"] = str(verity_size) # Adjust partition size for AVB hash footer or AVB hashtree footer. avb_footer_type = '' if prop_dict.get("avb_hash_enable") == "true": avb_footer_type = 'hash' elif prop_dict.get("avb_hashtree_enable") == "true": avb_footer_type = 'hashtree' if avb_footer_type: avbtool = prop_dict["avb_avbtool"] partition_size = prop_dict["partition_size"] # avb_add_hash_footer_args or avb_add_hashtree_footer_args. additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"] max_image_size = AVBCalcMaxImageSize(avbtool, avb_footer_type, partition_size, additional_args) if max_image_size == 0: return False prop_dict["partition_size"] = str(max_image_size) prop_dict["original_partition_size"] = partition_size if fs_type.startswith("ext"): build_command = [prop_dict["ext_mkuserimg"]] if "extfs_sparse_flag" in prop_dict: build_command.append(prop_dict["extfs_sparse_flag"]) run_e2fsck = True build_command.extend( [in_dir, out_file, fs_type, prop_dict["mount_point"]]) build_command.append(prop_dict["partition_size"]) if "journal_size" in prop_dict: build_command.extend(["-j", prop_dict["journal_size"]]) if "timestamp" in prop_dict: build_command.extend(["-T", str(prop_dict["timestamp"])]) if fs_config: build_command.extend(["-C", fs_config]) if target_out: build_command.extend(["-D", target_out]) if "block_list" in prop_dict: build_command.extend(["-B", prop_dict["block_list"]]) if "base_fs_file" in prop_dict: base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"]) if base_fs_file is None: return False build_command.extend(["-d", base_fs_file]) build_command.extend(["-L", prop_dict["mount_point"]]) if "extfs_inode_count" in prop_dict: build_command.extend(["-i", prop_dict["extfs_inode_count"]]) if "flash_erase_block_size" in prop_dict: build_command.extend(["-e", prop_dict["flash_erase_block_size"]]) if "flash_logical_block_size" in prop_dict: build_command.extend(["-o", prop_dict["flash_logical_block_size"]]) # Specify UUID and hash_seed if using mke2fs. if prop_dict["ext_mkuserimg"] == "mkuserimg_mke2fs.sh": if "uuid" in prop_dict: build_command.extend(["-U", prop_dict["uuid"]]) if "hash_seed" in prop_dict: build_command.extend(["-S", prop_dict["hash_seed"]]) if "ext4_share_dup_blocks" in prop_dict: build_command.append("-c") if "selinux_fc" in prop_dict: build_command.append(prop_dict["selinux_fc"]) elif fs_type.startswith("squash"): build_command = ["mksquashfsimage.sh"] build_command.extend([in_dir, out_file]) if "squashfs_sparse_flag" in prop_dict: build_command.extend([prop_dict["squashfs_sparse_flag"]]) build_command.extend(["-m", prop_dict["mount_point"]]) if target_out: build_command.extend(["-d", target_out]) if fs_config: build_command.extend(["-C", fs_config]) if "selinux_fc" in prop_dict: build_command.extend(["-c", prop_dict["selinux_fc"]]) if "block_list" in prop_dict: build_command.extend(["-B", prop_dict["block_list"]]) if "squashfs_block_size" in prop_dict: build_command.extend(["-b", prop_dict["squashfs_block_size"]]) if "squashfs_compressor" in prop_dict: build_command.extend(["-z", prop_dict["squashfs_compressor"]]) if "squashfs_compressor_opt" in prop_dict: build_command.extend(["-zo", prop_dict["squashfs_compressor_opt"]]) if prop_dict.get("squashfs_disable_4k_align") == "true": build_command.extend(["-a"]) elif fs_type.startswith("f2fs"): build_command = ["mkf2fsuserimg.sh"] build_command.extend([out_file, prop_dict["partition_size"]]) if fs_config: build_command.extend(["-C", fs_config]) build_command.extend(["-f", in_dir]) if target_out: build_command.extend(["-D", target_out]) if "selinux_fc" in prop_dict: build_command.extend(["-s", prop_dict["selinux_fc"]]) build_command.extend(["-t", prop_dict["mount_point"]]) if "timestamp" in prop_dict: build_command.extend(["-T", str(prop_dict["timestamp"])]) build_command.extend(["-L", prop_dict["mount_point"]]) else: print("Error: unknown filesystem type '%s'" % (fs_type)) return False if in_dir != origin_in: # Construct a staging directory of the root file system. ramdisk_dir = prop_dict.get("ramdisk_dir") if ramdisk_dir: shutil.rmtree(in_dir) shutil.copytree(ramdisk_dir, in_dir, symlinks=True) staging_system = os.path.join(in_dir, "system") shutil.rmtree(staging_system, ignore_errors=True) shutil.copytree(origin_in, staging_system, symlinks=True) (mkfs_output, exit_code) = RunCommand(build_command) if exit_code != 0: print("Error: '%s' failed with exit code %d:\n%s" % (build_command, exit_code, mkfs_output)) return False # Check if there's enough headroom space available for ext4 image. if "partition_headroom" in prop_dict and fs_type.startswith("ext4"): if not CheckHeadroom(mkfs_output, prop_dict): return False if not fs_spans_partition: mount_point = prop_dict.get("mount_point") partition_size = int(prop_dict.get("partition_size")) image_size = GetSimgSize(out_file) if image_size > partition_size: print( "Error: %s image size of %d is larger than partition size of " "%d" % (mount_point, image_size, partition_size)) return False if verity_supported and is_verity_partition: ZeroPadSimg(out_file, partition_size - image_size) # Create the verified image if this is to be verified. if verity_supported and is_verity_partition: if not MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict): return False # Add AVB HASH or HASHTREE footer (metadata). if avb_footer_type: avbtool = prop_dict["avb_avbtool"] original_partition_size = prop_dict["original_partition_size"] partition_name = prop_dict["partition_name"] # key_path and algorithm are only available when chain partition is used. key_path = prop_dict.get("avb_key_path") algorithm = prop_dict.get("avb_algorithm") salt = prop_dict.get("avb_salt") # avb_add_hash_footer_args or avb_add_hashtree_footer_args additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"] if not AVBAddFooter(out_file, avbtool, avb_footer_type, original_partition_size, partition_name, key_path, algorithm, salt, additional_args): return False if run_e2fsck and prop_dict.get("skip_fsck") != "true": success, unsparse_image = UnsparseImage(out_file, replace=False) if not success: return False # Run e2fsck on the inflated image file e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image] (e2fsck_output, exit_code) = RunCommand(e2fsck_command) os.remove(unsparse_image) if exit_code != 0: print("Error: '%s' failed with exit code %d:\n%s" % (e2fsck_command, exit_code, e2fsck_output)) return False return True
def setUp(self): self.tempdir = common.MakeTempDir()
def RepackApexPayload(self, payload_dir, payload_key, payload_public_key, signing_args=None): """Rebuilds the apex file with the updated payload directory.""" apex_dir = common.MakeTempDir() # Extract the apex file and reuse its meta files as repack parameters. common.UnzipToDir(self.apex_path, apex_dir) android_jar_path = common.OPTIONS.android_jar_path if not android_jar_path: android_jar_path = os.path.join( os.environ.get('ANDROID_BUILD_TOP', ''), 'prebuilts', 'sdk', 'current', 'public', 'android.jar') logger.warning( 'android_jar_path not found in options, falling back to' ' use %s', android_jar_path) arguments_dict = { 'manifest': os.path.join(apex_dir, 'apex_manifest.pb'), 'build_info': os.path.join(apex_dir, 'apex_build_info.pb'), 'android_jar_path': android_jar_path, 'key': payload_key, 'pubkey': payload_public_key, } for filename in arguments_dict.values(): assert os.path.exists(filename), 'file {} not found'.format( filename) # The repack process will add back these files later in the payload image. for name in ['apex_manifest.pb', 'apex_manifest.json', 'lost+found']: path = os.path.join(payload_dir, name) if os.path.isfile(path): os.remove(path) elif os.path.isdir(path): shutil.rmtree(path) repacked_apex = common.MakeTempFile(suffix='.apex') repack_cmd = [ 'apexer', '--force', '--include_build_info', '--do_not_check_keyname', '--apexer_tool_path', os.getenv('PATH') ] for key, val in arguments_dict.items(): repack_cmd.extend(['--' + key, val]) # Add quote to the signing_args as we will pass # --signing_args "--signing_helper_with_files=%path" to apexer if signing_args: repack_cmd.extend(['--signing_args', '"{}"'.format(signing_args)]) # optional arguments for apex repacking manifest_json = os.path.join(apex_dir, 'apex_manifest.json') if os.path.exists(manifest_json): repack_cmd.extend(['--manifest_json', manifest_json]) assets_dir = os.path.join(apex_dir, 'assets') if os.path.isdir(assets_dir): repack_cmd.extend(['--assets_dir', assets_dir]) repack_cmd.extend([payload_dir, repacked_apex]) if OPTIONS.verbose: repack_cmd.append('-v') common.RunAndCheckOutput(repack_cmd) return repacked_apex
def AddVBMeta(output_zip, partitions): """Creates a VBMeta image and store it in output_zip. Args: output_zip: The output zip file, which needs to be already open. partitions: A dict that's keyed by partition names with image paths as values. Only valid partition names are accepted, which include 'boot', 'recovery', 'system', 'vendor', 'dtbo'. """ img = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vbmeta.img") targetFile = os.path.join(OPTIONS.input_tmp, "vbmeta.img") if os.path.exists(img.input_name): print("vbmeta.img already exists; not rebuilding...") return img.input_name avbtool = os.getenv('AVBTOOL') or OPTIONS.info_dict["avb_avbtool"] cmd = [avbtool, "make_vbmeta_image", "--output", img.name] common.AppendAVBSigningArgs(cmd, "vbmeta") public_key_dir = common.MakeTempDir(prefix="avbpubkey-") for partition, path in partitions.items(): assert partition in common.AVB_PARTITIONS, 'Unknown partition: %s' % ( partition, ) assert os.path.exists( path), 'Failed to find %s for partition %s' % (path, partition) AppendVBMetaArgsForPartition(cmd, partition, path, public_key_dir) args = OPTIONS.info_dict.get("avb_vbmeta_args") if args and args.strip(): split_args = shlex.split(args) for index, arg in enumerate(split_args[:-1]): # Sanity check that the image file exists. Some images might be defined # as a path relative to source tree, which may not be available at the # same location when running this script (we have the input target_files # zip only). For such cases, we additionally scan other locations (e.g. # IMAGES/, RADIO/, etc) before bailing out. if arg == '--include_descriptors_from_image': image_path = split_args[index + 1] if os.path.exists(image_path): continue found = False for dir_name in [ 'IMAGES', 'RADIO', 'VENDOR_IMAGES', 'PREBUILT_IMAGES' ]: alt_path = os.path.join(OPTIONS.input_tmp, dir_name, os.path.basename(image_path)) if os.path.exists(alt_path): split_args[index + 1] = alt_path found = True break assert found, 'failed to find %s' % (image_path, ) cmd.extend(split_args) p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() assert p.returncode == 0, "avbtool make_vbmeta_image failed" img.Write() out_path = os.environ["OUT"] + "/vbmeta.img" cmd = "cp %s %s" % (img.input_name, targetFile) os.system(cmd) cmd = "cp %s %s" % (img.input_name, out_path) os.system(cmd)
def GenerateFSVerityMetadata(in_dir, fsverity_path, apk_key_path, apk_manifest_path, apk_out_path): """Generates fsverity metadata files. By setting PRODUCT_SYSTEM_FSVERITY_GENERATE_METADATA := true, fsverity metadata files will be generated. For the input files, see `patterns` below. One metadata file per one input file will be generated with the suffix .fsv_meta. e.g. system/framework/foo.jar -> system/framework/foo.jar.fsv_meta Also a mapping file containing fsverity digests will be generated to system/etc/security/fsverity/BuildManifest.apk. Args: in_dir: temporary working directory (same as BuildImage) fsverity_path: path to host tool fsverity apk_key_path: path to key (e.g. build/make/target/product/security/platform) apk_manifest_path: path to AndroidManifest.xml for APK apk_out_path: path to the output APK Returns: None. The files are generated directly under in_dir. """ patterns = [ "system/framework/*.jar", "system/framework/oat/*/*.oat", "system/framework/oat/*/*.vdex", "system/framework/oat/*/*.art", "system/etc/boot-image.prof", "system/etc/dirty-image-objects", ] files = [] for pattern in patterns: files += glob.glob(os.path.join(in_dir, pattern)) files = sorted(set(files)) generator = FSVerityMetadataGenerator(fsverity_path) generator.set_hash_alg("sha256") digests = FSVerityDigests() for f in files: generator.generate(f) # f is a full path for now; make it relative so it starts with {mount_point}/ digest = digests.digests[os.path.relpath(f, in_dir)] digest.digest = generator.digest(f) digest.hash_alg = "sha256" temp_dir = common.MakeTempDir() os.mkdir(os.path.join(temp_dir, "assets")) metadata_path = os.path.join(temp_dir, "assets", "build_manifest") with open(metadata_path, "wb") as f: f.write(digests.SerializeToString()) apk_path = os.path.join(in_dir, apk_out_path) common.RunAndCheckOutput([ "aapt2", "link", "-A", os.path.join(temp_dir, "assets"), "-o", apk_path, "--manifest", apk_manifest_path ]) common.RunAndCheckOutput([ "apksigner", "sign", "--in", apk_path, "--cert", apk_key_path + ".x509.pem", "--key", apk_key_path + ".pk8" ])