def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys") if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print("extra recovery-only key(s): " + ", ".join(extra_recovery_keys)) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise common.ExternalError( "can't parse \"%s\" from META/otakeys.txt" % (k,)) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print("using:\n ", "\n ".join(mapped_keys)) print("for OTA package verification") else: devkey = misc_info.get("default_system_dev_certificate", "build/make/target/product/security/testkey") mapped_devkey = OPTIONS.key_map.get(devkey, devkey) if mapped_devkey != devkey: misc_info["default_system_dev_certificate"] = mapped_devkey mapped_keys.append(mapped_devkey + ".x509.pem") print("META/otakeys.txt has no keys; using %s for OTA package" " verification." % (mapped_keys[0],)) # recovery now uses the same x509.pem version of the keys. # extra_recovery_keys are used only in recovery. if misc_info.get("recovery_as_boot") == "true": recovery_keys_location = "BOOT/RAMDISK/system/etc/security/otacerts.zip" else: recovery_keys_location = "RECOVERY/RAMDISK/system/etc/security/otacerts.zip" WriteOtacerts(output_tf_zip, recovery_keys_location, mapped_keys + extra_recovery_keys) # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. WriteOtacerts(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", mapped_keys)
def __init__(self, mount_point, file_name=None, target_ver_dir=None, source_ver_dir=None, extract=False, verbatim=False, mount_point2=None, mount_point3=None, nv_merge=None, spl_merge=False): self.script = PartitionUpdater.script self.options = PartitionUpdater.options self.mount_point = mount_point self.file_name = file_name self.extract = extract self.verbatim = verbatim self.nv_merge = nv_merge self.spl_merge = spl_merge self.update_flag = False self.inc_flag = False self.mount_point2 = mount_point2 self.mount_point3 = mount_point3 self.need_extract = None if nv_merge or spl_merge: self.extract = True self.verbatim = True self.need_extract = OPTIONS.cache_path fstab = self.options.info_dict["fstab"] if fstab is None: raise common.ExternalError("no fstab") self.partition = fstab.get(mount_point, None) if self.partition is None and mount_point2 is not None: self.partition = fstab.get(mount_point2, None) if self.partition is None: print("[Warning:] no patition in fstab for mount point %s" % mount_point) self.partition = Partition() self.partition.mount_point = mount_point self.partition.extract = OPTIONS.cache_path self.partition.fs_type = "yaffs2" self.partition.secureboot = None print("[Warning:] auto create patition %s" % self.partition) self.extract = True self.verbatim = True if target_ver_dir is None and PartitionUpdater.IfNeedImageFile( mount_point): raise common.ExternalError("init PartitionUpdater error")
def __init__(self, partition, file_name, input_dir, bootable=False, subdir=None): self.partition = partition self.file_name = file_name if file_name is None or input_dir is None: if PartitionUpdater.IfNeedImageFile(partition.mount_point): raise common.ExternalError("init PartitionFile error") return self.full_name = os.path.join(input_dir, file_name) if os.path.exists(self.full_name): file_data = open(self.full_name).read() if OPTIONS.secure_boot: if partition.secureboot: file_data = common.DoSprdSign(partition.secureboot, file_name, file_data, partition.mount_point) self.bin = common.File(file_name, file_data) self.size = len(file_data) else: print("[Warning:] no image file %s" % (self.full_name)) self.bin = None self.size = 0
def GetBuildProp(cls, key): if (cls.properties == None): cls.properties = cls.script.info.get("build.prop", {}) try: return cls.properties[key] except KeyError: raise common.ExternalError("couldn't find %s in build.prop" % (key,))
def GetBuildProp(prop, info_dict): """Return the fingerprint of the build of a given target-files info_dict.""" try: return info_dict.get("build.prop", {})[prop] except KeyError: raise common.ExternalError("couldn't find %s in build.prop" % (property, ))
def __init__(self, options, single_key): self.options = options if options.secure_boot_tool is None: raise common.ExternalError( "no secure_boot_tool defined in \"META/misc_info.txt\" of target file" ) self.secure_boot_tool = options.secure_boot_tool self.key1 = None self.key2 = None self.key3 = None self.debug = True self.editor = os.getenv("EDITOR", None) self.cfgfile = os.getenv("SPRD_SECURE_BOOT_SIGN_CONFIG", "vendor/projects/secure_boot_tools/key.txt" ) # changed for security boot by fangjun.xu self.single_key = single_key self.sign_key_config = {} if single_key is True: self.sign_key_config["/spl"] = PartitionSignInfo("key1", "key1") self.sign_key_config["/uboot"] = PartitionSignInfo("key1", "key1") self.sign_key_config["other"] = PartitionSignInfo("key1", "key1") else: self.sign_key_config["/spl"] = PartitionSignInfo("key1", "key2") self.sign_key_config["/uboot"] = PartitionSignInfo("key2", "key3") self.sign_key_config["other"] = PartitionSignInfo("key3", None)
def GetFixNvSize(self): if self.nv_merge == None: raise common.ExternalError("internal error: no nv_merge given in GetFixNvSize()") if self.nv_merge == "" or self.nv_merge == "wcn": return "0x00" prop_key="ro.modem.%s.fixnv_size" % (self.nv_merge) return PartitionUpdater.GetBuildProp(prop_key)
def FullUpdateToPartition(self): mount_point = self.partition.mount_point extract = self.IsExtract() if extract: if self.need_extract is not None: mount_point = self.need_extract self.script.Print("extract " + self.target.file_name + " to " + mount_point + " ....") else: self.script.Print("write " + self.target.file_name + " to partition " + mount_point + " ....") self.FormatPartition() if not extract or self.need_extract is None: mount_point_temp = mount_point[1:] common.CheckSize(self.target.bin.data, mount_point_temp, self.options.info_dict) p = self.options.info_dict["fstab"].get(self.mount_point, None) if p is not None: p1 = None pt_dev1 = None else: p = self.options.info_dict["fstab"][self.mount_point2] p1 = self.options.info_dict["fstab"][self.mount_point3] pt_dev1 = p1.device if p is None: raise common.ExternalError("no partion %s in fstab" % (self.mount_point2)) if p1 is None: print("no partion %s in fstab" % (self.mount_point3)) pt_dev = p.device if extract: self.script_ext.UnpackPackageFile( self.target.file_name, os.path.join(mount_point, self.target.file_name)) else: self.script.WriteRawImage(mount_point, self.target.file_name) if self.nv_merge: nvmerge_exe = os.path.join(OPTIONS.tmp_path, "nvmerge") nvmerge_cfg = os.path.join(OPTIONS.tmp_path, "nvmerge.cfg") new_nv = os.path.join(OPTIONS.cache_path, self.target.file_name) merged_nv = os.path.join(OPTIONS.cache_path, "merged_" + self.target.file_name) #self.script_ext.Run_program(nvmerge_exe, nvmerge_cfg, self.GetRealDevicePath(p, pt_dev), new_nv, merged_nv, self.GetFixNvSize()) self.script_ext.Run_program(nvmerge_exe, nvmerge_cfg, self.GetRealDevicePath(p, pt_dev), self.GetRealDevicePath(p, pt_dev1), new_nv, merged_nv, self.GetFixNvSize()) self.script_ext.WritePartitionImage(p, merged_nv, pt_dev) if p1 is not None: self.script_ext.WritePartitionImage(p1, merged_nv, pt_dev1) self.script.DeleteFiles([new_nv, merged_nv]) if self.spl_merge: new_spl = os.path.join(OPTIONS.cache_path, self.target.file_name) self.script_ext.MergeSpl(p, new_spl, pt_dev) self.script.DeleteFiles([new_spl])
def GenerateNonAbOtaPackage(target_file, output_file, source_file=None): """Generates a non-A/B OTA package.""" # Check the loaded info dicts first. if OPTIONS.info_dict.get("no_recovery") == "true": raise common.ExternalError( "--- target build has specified no recovery ---") # Non-A/B OTAs rely on /cache partition to store temporary files. cache_size = OPTIONS.info_dict.get("cache_size") if cache_size is None: logger.warning("--- can't determine the cache partition size ---") OPTIONS.cache_size = cache_size if OPTIONS.extra_script is not None: with open(OPTIONS.extra_script) as fp: OPTIONS.extra_script = fp.read() if OPTIONS.extracted_input is not None: OPTIONS.input_tmp = OPTIONS.extracted_input else: logger.info("unzipping target target-files...") OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN) OPTIONS.target_tmp = OPTIONS.input_tmp # If the caller explicitly specified the device-specific extensions path via # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it # is present in the target target_files. Otherwise, take the path of the file # from 'tool_extensions' in the info dict and look for that in the local # filesystem, relative to the current directory. if OPTIONS.device_specific is None: from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py") if os.path.exists(from_input): logger.info("(using device-specific extensions from target_files)") OPTIONS.device_specific = from_input else: OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions") if OPTIONS.device_specific is not None: OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific) # Generate a full OTA. if source_file is None: with zipfile.ZipFile(target_file) as input_zip: WriteFullOTAPackage( input_zip, output_file) # Generate an incremental OTA. else: logger.info("unzipping source target-files...") OPTIONS.source_tmp = common.UnzipTemp( OPTIONS.incremental_source, UNZIP_PATTERN) with zipfile.ZipFile(target_file) as input_zip, \ zipfile.ZipFile(source_file) as source_zip: WriteBlockIncrementalOTAPackage( input_zip, source_zip, output_file)
def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None): """ Checks VINTF metadata of an extracted target files directory. Args: inp: path to the directory that contains the extracted target files archive. info_dict: The build-time info dict. If None, it will be loaded from inp. Returns: True if VINTF check is skipped or compatible, False if incompatible. Raise a RuntimeError if any error occurs. """ if info_dict is None: info_dict = common.LoadInfoDict(input_tmp) if info_dict.get('vintf_enforce') != 'true': logger.warning( 'PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks') return True dirmap = GetDirmap(input_tmp) args_for_skus = GetArgsForSkus(info_dict) shipping_api_level_args = GetArgsForShippingApiLevel(info_dict) kernel_args = GetArgsForKernel(input_tmp) common_command = [ 'checkvintf', '--check-compat', ] for device_path, real_path in dirmap.items(): common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)] common_command += kernel_args common_command += shipping_api_level_args success = True for sku_args in args_for_skus: command = common_command + sku_args proc = common.Run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode == 0: logger.info("Command `%s` returns 'compatible'", ' '.join(command)) elif out.strip() == "INCOMPATIBLE": logger.info("Command `%s` returns 'incompatible'", ' '.join(command)) success = False else: raise common.ExternalError( "Failed to run command '{}' (exit code {}):\nstdout:{}\nstderr:{}" .format(' '.join(command), proc.returncode, out, err)) logger.info("stdout: %s", out) logger.info("stderr: %s", err) return success
def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys") if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print("extra recovery-only key(s): " + ", ".join(extra_recovery_keys)) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise common.ExternalError( "can't parse \"%s\" from META/otakeys.txt" % (k,)) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print("using:\n ", "\n ".join(mapped_keys)) print("for OTA package verification") else: devkey = misc_info.get("default_system_dev_certificate", "build/make/target/product/security/testkey") mapped_devkey = OPTIONS.key_map.get(devkey, devkey) if mapped_devkey != devkey: misc_info["default_system_dev_certificate"] = mapped_devkey mapped_keys.append(mapped_devkey + ".x509.pem") print("META/otakeys.txt has no keys; using %s for OTA package" " verification." % (mapped_keys[0],)) otacerts = [info for info in input_tf_zip.infolist() if info.filename.endswith("/otacerts.zip")] for info in otacerts: print("Rewriting OTA key:", info.filename, mapped_keys) WriteOtacerts(output_tf_zip, info.filename, mapped_keys)
def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys", None) if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys)
def get_section(data, name): section_table_offset = struct.unpack_from("<I", data, 0x20)[0] num_sections, str_table_idx = struct.unpack_from("<HH", data, 0x30) str_table_offset = section_table_offset + (str_table_idx * section_entry_sz) _, _, _, _, str_section_offset, str_section_size, _, _, _, _ = struct.unpack_from("<10I", data, str_table_offset) for i in range(num_sections): section_offset = section_table_offset + (i * section_entry_sz) section_table_data = struct.unpack_from("<10I", data, section_offset) section_name_idx, _, _, _, section_offset, section_size, _, _, _, _ = section_table_data section_name = data[str_section_offset + section_name_idx:str_section_offset + section_name_idx + len(name)] if section_name != name: continue print "Found", section_name, "at offset", hex(section_offset) return (section_offset, section_size) raise common.ExternalError("Section not found")
def GetFileDestination(fn, filesmap): # if file is encoded disregard the .enc extention if fn.endswith('.enc'): fn = fn[:-4] # get backup destination as well if present backup = None if fn + ".bak" in filesmap: backup = filesmap[fn + ".bak"] # If full filename is not specified in filesmap get only the name part # and look for this token if fn not in filesmap: fn = fn.split(".")[0] + ".*" if fn not in filesmap: if ("vbmeta" in fn or "dtbo" in fn): raise common.ExternalError("Filesmap entry for vbmeta or dtbo missing !!") print "warning radio-update: '%s' not found in filesmap" % (fn) return None, backup return filesmap[fn], backup
def GetFileDestination(fn, filesmap): # if file is encoded disregard the .enc extention if fn.endswith('.enc'): fn = fn[:-4] # get backup destination as well if present backup = None if fn + ".bak" in filesmap: backup = filesmap[fn + ".bak"] # Assert if an image belonging to target_files_IMAGES_list is not found in filesmap # but found in IMAGES/ as these are critical images like vbmeta/dtbo etc. if fn in target_files_IMAGES_list and fn not in filesmap: raise common.ExternalError("Filesmap entry for " + fn + " missing !!") # If full filename is not specified in filesmap get only the name part # and look for this token if fn not in filesmap: fn = fn.split(".")[0] + ".*" if fn not in filesmap: print "warning radio-update: '%s' not found in filesmap" % (fn) return None, backup return filesmap[fn], backup
def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys", None) if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise common.ExternalError( "can't parse \"%s\" from META/otakeys.txt" % (k,)) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print "using:\n ", "\n ".join(mapped_keys) print "for OTA package verification" else: devkey = misc_info.get("default_system_dev_certificate", "build/target/product/security/testkey") mapped_keys.append( OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") print("META/otakeys.txt has no keys; using %s for OTA package" " verification." % (mapped_keys[0],)) # recovery uses a version of the key that has been slightly # predigested (by DumpPublicKey.java) and put in res/keys. # extra_recovery_keys are used only in recovery. p = common.Run(["java", "-jar", os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + mapped_keys + extra_recovery_keys, stdout=subprocess.PIPE) new_recovery_keys, _ = p.communicate() if p.returncode != 0: raise common.ExternalError("failed to run dumpkeys") # system_root_image puts the recovery keys at BOOT/RAMDISK. if misc_info.get("system_root_image") == "true": recovery_keys_location = "BOOT/RAMDISK/res/keys" else: recovery_keys_location = "RECOVERY/RAMDISK/res/keys" common.ZipWriteStr(output_tf_zip, recovery_keys_location, new_recovery_keys) # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. temp_file = cStringIO.StringIO() certs_zip = zipfile.ZipFile(temp_file, "w") for k in mapped_keys: common.ZipWrite(certs_zip, k) common.ZipClose(certs_zip) common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", temp_file.getvalue()) # For A/B devices, update the payload verification key. if misc_info.get("ab_update") == "true": # Unlike otacerts.zip that may contain multiple keys, we can only specify # ONE payload verification key. if len(mapped_keys) > 1: print("\n WARNING: Found more than one OTA keys; Using the first one" " as payload verification key.\n\n") print "Using %s for payload verification." % (mapped_keys[0],) cmd = common.Run( ["openssl", "x509", "-pubkey", "-noout", "-in", mapped_keys[0]], stdout=subprocess.PIPE) pubkey, _ = cmd.communicate() common.ZipWriteStr( output_tf_zip, "SYSTEM/etc/update_engine/update-payload-key.pub.pem", pubkey) common.ZipWriteStr( output_tf_zip, "BOOT/RAMDISK/etc/update_engine/update-payload-key.pub.pem", pubkey) return new_recovery_keys
def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys", None) if extra_recovery_keys: extra_recovery_keys = [ OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split() ] if extra_recovery_keys: print "extra recovery-only key(s): " + ", ".join( extra_recovery_keys) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise common.ExternalError( "can't parse \"%s\" from META/otakeys.txt" % (k, )) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print "using:\n ", "\n ".join(mapped_keys) print "for OTA package verification" else: devkey = misc_info.get("default_system_dev_certificate", "build/target/product/security/testkey") mapped_keys.append(OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") print "META/otakeys.txt has no keys; using", mapped_keys[0] # recovery uses a version of the key that has been slightly # predigested (by DumpPublicKey.java) and put in res/keys. # extra_recovery_keys are used only in recovery. p = common.Run([ "java", "-jar", os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar") ] + mapped_keys + extra_recovery_keys, stdout=subprocess.PIPE) new_recovery_keys, _ = p.communicate() if p.returncode != 0: raise common.ExternalError("failed to run dumpkeys") common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", new_recovery_keys) # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. temp_file = cStringIO.StringIO() certs_zip = zipfile.ZipFile(temp_file, "w") for k in mapped_keys: certs_zip.write(k) certs_zip.close() common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", temp_file.getvalue()) return new_recovery_keys
def process_misc_info_txt(system_target_files_temp_dir, other_target_files_temp_dir, output_target_files_temp_dir, system_misc_info_keys): """Perform special processing for META/misc_info.txt This function merges the contents of the META/misc_info.txt files from the system directory and the other directory, placing the merged result in the output directory. The precondition in that the files are already extracted. The post condition is that the output META/misc_info.txt contains the merged content. Args: system_target_files_temp_dir: The name of a directory containing the special items extracted from the system target files package. other_target_files_temp_dir: The name of a directory containing the special items extracted from the other target files package. output_target_files_temp_dir: The name of a directory that will be used to create the output target files package after all the special cases are processed. system_misc_info_keys: A list of keys to obtain from the system instance of META/misc_info.txt. The remaining keys from the other instance. """ def read_helper(d): misc_info_txt = os.path.join(d, 'META', 'misc_info.txt') with open(misc_info_txt) as f: return list(f.read().splitlines()) system_info_dict = common.LoadDictionaryFromLines( read_helper(system_target_files_temp_dir)) # We take most of the misc info from the other target files. merged_info_dict = common.LoadDictionaryFromLines( read_helper(other_target_files_temp_dir)) # Replace certain values in merged_info_dict with values from # system_info_dict. for key in system_misc_info_keys: merged_info_dict[key] = system_info_dict[key] # Merge misc info keys used for Dynamic Partitions. if (merged_info_dict.get('use_dynamic_partitions') == 'true') and (system_info_dict.get('use_dynamic_partitions') == 'true'): merged_info_dict['dynamic_partition_list'] = '%s %s' % ( system_info_dict.get('dynamic_partition_list', ''), merged_info_dict.get('dynamic_partition_list', '')) # Partition groups and group sizes are defined by the other (non-system) # misc info file because these values may vary for each board that uses # a shared system image. for partition_group in merged_info_dict[ 'super_partition_groups'].split(' '): if ('super_%s_group_size' % partition_group) not in merged_info_dict: raise common.ExternalError( 'Other META/misc_info.txt does not contain required key ' 'super_%s_group_size.' % partition_group) key = 'super_%s_partition_list' % partition_group merged_info_dict[key] = '%s %s' % (system_info_dict.get( key, ''), merged_info_dict.get(key, '')) output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META', 'misc_info.txt') sorted_keys = sorted(merged_info_dict.keys()) with open(output_misc_info_txt, 'w') as output: for key in sorted_keys: output.write('{}={}\n'.format(key, merged_info_dict[key]))
def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info): try: keylist = input_tf_zip.read("META/otakeys.txt").split() except KeyError: raise common.ExternalError("can't read META/otakeys.txt from input") extra_recovery_keys = misc_info.get("extra_recovery_keys") if extra_recovery_keys: extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem" for k in extra_recovery_keys.split()] if extra_recovery_keys: print("extra recovery-only key(s): " + ", ".join(extra_recovery_keys)) else: extra_recovery_keys = [] mapped_keys = [] for k in keylist: m = re.match(r"^(.*)\.x509\.pem$", k) if not m: raise common.ExternalError( "can't parse \"%s\" from META/otakeys.txt" % (k,)) k = m.group(1) mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem") if mapped_keys: print("using:\n ", "\n ".join(mapped_keys)) print("for OTA package verification") else: devkey = misc_info.get("default_system_dev_certificate", "build/target/product/security/testkey") mapped_devkey = OPTIONS.key_map.get(devkey, devkey) if mapped_devkey != devkey: misc_info["default_system_dev_certificate"] = mapped_devkey mapped_keys.append(mapped_devkey + ".x509.pem") print("META/otakeys.txt has no keys; using %s for OTA package" " verification." % (mapped_keys[0],)) # recovery now uses the same x509.pem version of the keys. # extra_recovery_keys are used only in recovery. if misc_info.get("recovery_as_boot") == "true": recovery_keys_location = "BOOT/RAMDISK/system/etc/security/otacerts.zip" else: recovery_keys_location = "RECOVERY/RAMDISK/system/etc/security/otacerts.zip" WriteOtacerts(output_tf_zip, recovery_keys_location, mapped_keys + extra_recovery_keys) # SystemUpdateActivity uses the x509.pem version of the keys, but # put into a zipfile system/etc/security/otacerts.zip. # We DO NOT include the extra_recovery_keys (if any) here. WriteOtacerts(output_tf_zip, "SYSTEM/etc/security/otacerts.zip", mapped_keys) # For A/B devices, update the payload verification key. if misc_info.get("ab_update") == "true": # Unlike otacerts.zip that may contain multiple keys, we can only specify # ONE payload verification key. if len(mapped_keys) > 1: print("\n WARNING: Found more than one OTA keys; Using the first one" " as payload verification key.\n\n") print("Using %s for payload verification." % (mapped_keys[0],)) pubkey = common.ExtractPublicKey(mapped_keys[0]) common.ZipWriteStr( output_tf_zip, "SYSTEM/etc/update_engine/update-payload-key.pub.pem", pubkey) common.ZipWriteStr( output_tf_zip, "BOOT/RAMDISK/system/etc/update_engine/update-payload-key.pub.pem", pubkey)
def main(argv): def option_handler(o, a): if o in ("-k", "--package_key"): OPTIONS.package_key = a elif o in ("-i", "--incremental_from"): OPTIONS.incremental_source = a elif o == "--full_radio": OPTIONS.full_radio = True elif o == "--full_bootloader": OPTIONS.full_bootloader = True elif o == "--wipe_user_data": OPTIONS.wipe_user_data = True elif o == "--downgrade": OPTIONS.downgrade = True OPTIONS.wipe_user_data = True elif o == "--override_timestamp": OPTIONS.downgrade = True elif o in ("-o", "--oem_settings"): OPTIONS.oem_source = a.split(',') elif o == "--oem_no_mount": OPTIONS.oem_no_mount = True elif o in ("-e", "--extra_script"): OPTIONS.extra_script = a elif o in ("-t", "--worker_threads"): if a.isdigit(): OPTIONS.worker_threads = int(a) else: raise ValueError("Cannot parse value %r for option %r - only " "integers are allowed." % (a, o)) elif o in ("-2", "--two_step"): OPTIONS.two_step = True elif o == "--include_secondary": OPTIONS.include_secondary = True elif o == "--no_signing": OPTIONS.no_signing = True elif o == "--verify": OPTIONS.verify = True elif o == "--block": OPTIONS.block_based = True elif o in ("-b", "--binary"): OPTIONS.updater_binary = a elif o == "--stash_threshold": try: OPTIONS.stash_threshold = float(a) except ValueError: raise ValueError( "Cannot parse value %r for option %r - expecting " "a float" % (a, o)) elif o == "--log_diff": OPTIONS.log_diff = a elif o == "--payload_signer": OPTIONS.payload_signer = a elif o == "--payload_signer_args": OPTIONS.payload_signer_args = shlex.split(a) elif o == "--payload_signer_maximum_signature_size": OPTIONS.payload_signer_maximum_signature_size = a elif o == "--payload_signer_key_size": # TODO(Xunchang) remove this option after cleaning up the callers. logger.warning( "The option '--payload_signer_key_size' is deprecated." " Use '--payload_signer_maximum_signature_size' instead.") OPTIONS.payload_signer_maximum_signature_size = a elif o == "--extracted_input_target_files": OPTIONS.extracted_input = a elif o == "--skip_postinstall": OPTIONS.skip_postinstall = True elif o == "--retrofit_dynamic_partitions": OPTIONS.retrofit_dynamic_partitions = True elif o == "--skip_compatibility_check": OPTIONS.skip_compatibility_check = True elif o == "--output_metadata_path": OPTIONS.output_metadata_path = a elif o == "--disable_fec_computation": OPTIONS.disable_fec_computation = True elif o == "--force_non_ab": OPTIONS.force_non_ab = True elif o == "--boot_variable_file": OPTIONS.boot_variable_file = a else: return False return True args = common.ParseOptions(argv, __doc__, extra_opts="b:k:i:d:e:t:2o:", extra_long_opts=[ "package_key=", "incremental_from=", "full_radio", "full_bootloader", "wipe_user_data", "downgrade", "override_timestamp", "extra_script=", "worker_threads=", "two_step", "include_secondary", "no_signing", "block", "binary=", "oem_settings=", "oem_no_mount", "verify", "stash_threshold=", "log_diff=", "payload_signer=", "payload_signer_args=", "payload_signer_maximum_signature_size=", "payload_signer_key_size=", "extracted_input_target_files=", "skip_postinstall", "retrofit_dynamic_partitions", "skip_compatibility_check", "output_metadata_path=", "disable_fec_computation", "force_non_ab", "boot_variable_file=", ], extra_option_handler=option_handler) if len(args) != 2: common.Usage(__doc__) sys.exit(1) common.InitLogging() if OPTIONS.downgrade: # We should only allow downgrading incrementals (as opposed to full). # Otherwise the device may go back from arbitrary build with this full # OTA package. if OPTIONS.incremental_source is None: raise ValueError("Cannot generate downgradable full OTAs") # Load the build info dicts from the zip directly or the extracted input # directory. We don't need to unzip the entire target-files zips, because they # won't be needed for A/B OTAs (brillo_update_payload does that on its own). # When loading the info dicts, we don't need to provide the second parameter # to common.LoadInfoDict(). Specifying the second parameter allows replacing # some properties with their actual paths, such as 'selinux_fc', # 'ramdisk_dir', which won't be used during OTA generation. if OPTIONS.extracted_input is not None: OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input) else: with zipfile.ZipFile(args[0], 'r') as input_zip: OPTIONS.info_dict = common.LoadInfoDict(input_zip) logger.info("--- target info ---") common.DumpInfoDict(OPTIONS.info_dict) # Load the source build dict if applicable. if OPTIONS.incremental_source is not None: OPTIONS.target_info_dict = OPTIONS.info_dict with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip: OPTIONS.source_info_dict = common.LoadInfoDict(source_zip) logger.info("--- source info ---") common.DumpInfoDict(OPTIONS.source_info_dict) # Load OEM dicts if provided. OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source) # Assume retrofitting dynamic partitions when base build does not set # use_dynamic_partitions but target build does. if (OPTIONS.source_info_dict and OPTIONS.source_info_dict.get("use_dynamic_partitions") != "true" and OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"): if OPTIONS.target_info_dict.get( "dynamic_partition_retrofit") != "true": raise common.ExternalError( "Expect to generate incremental OTA for retrofitting dynamic " "partitions, but dynamic_partition_retrofit is not set in target " "build.") logger.info("Implicitly generating retrofit incremental OTA.") OPTIONS.retrofit_dynamic_partitions = True # Skip postinstall for retrofitting dynamic partitions. if OPTIONS.retrofit_dynamic_partitions: OPTIONS.skip_postinstall = True ab_update = OPTIONS.info_dict.get("ab_update") == "true" allow_non_ab = OPTIONS.info_dict.get("allow_non_ab") == "true" if OPTIONS.force_non_ab: assert allow_non_ab, "--force_non_ab only allowed on devices that supports non-A/B" assert ab_update, "--force_non_ab only allowed on A/B devices" generate_ab = not OPTIONS.force_non_ab and ab_update # Use the default key to sign the package if not specified with package_key. # package_keys are needed on ab_updates, so always define them if an # A/B update is getting created. if not OPTIONS.no_signing or generate_ab: if OPTIONS.package_key is None: OPTIONS.package_key = OPTIONS.info_dict.get( "default_system_dev_certificate", "build/make/target/product/security/testkey") # Get signing keys OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key]) if generate_ab: GenerateAbOtaPackage(target_file=args[0], output_file=args[1], source_file=OPTIONS.incremental_source) else: GenerateNonAbOtaPackage(target_file=args[0], output_file=args[1], source_file=OPTIONS.incremental_source) # Post OTA generation works. if OPTIONS.incremental_source is not None and OPTIONS.log_diff: logger.info("Generating diff logs...") logger.info("Unzipping target-files for diffing...") target_dir = common.UnzipTemp(args[0], TARGET_DIFFING_UNZIP_PATTERN) source_dir = common.UnzipTemp(OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN) with open(OPTIONS.log_diff, 'w') as out_file: target_files_diff.recursiveDiff('', source_dir, target_dir, out_file) logger.info("done.")
mapped_keys.append( OPTIONS.key_map.get(devkey, devkey) + ".x509.pem") print("META/otakeys.txt has no keys; using %s for OTA package" " verification." % (mapped_keys[0],)) # recovery uses a version of the key that has been slightly # predigested (by DumpPublicKey.java) and put in res/keys. # extra_recovery_keys are used only in recovery. cmd = ([OPTIONS.java_path] + OPTIONS.java_args + ["-jar", os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")] + mapped_keys + extra_recovery_keys) p = common.Run(cmd, stdout=subprocess.PIPE) new_recovery_keys, _ = p.communicate() if p.returncode != 0: raise common.ExternalError("failed to run dumpkeys") # system_root_image puts the recovery keys at BOOT/RAMDISK. if misc_info.get("system_root_image") == "true": recovery_keys_location = "BOOT/RAMDISK/res/keys" else: recovery_keys_location = "RECOVERY/RAMDISK/res/keys" common.ZipWriteStr(output_tf_zip, recovery_keys_location, new_recovery_keys) # Save the base64 key representation in the update for key-change # validations p = common.Run(["python", "vendor/cm/build/tools/getb64key.py", mapped_keys[0]], stdout=subprocess.PIPE) data, _ = p.communicate() if p.returncode == 0: common.ZipWriteStr(output_tf_zip, "META/releasekey.txt", data)
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip): source_version = OPTIONS.source_info_dict["recovery_api_version"] target_version = OPTIONS.target_info_dict["recovery_api_version"] if source_version == 0: print( "WARNING: generating edify script for a source that " "can't install it.") script = edify_generator.EdifyGenerator(source_version, OPTIONS.target_info_dict) if OPTIONS.override_prop: metadata = { "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict), } else: metadata = { "pre-device": GetBuildProp("ro.product.device", OPTIONS.source_info_dict), "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict), } device_specific = common.DeviceSpecificParams( source_zip=source_zip, source_version=source_version, target_zip=target_zip, target_version=target_version, output_zip=output_zip, script=script, metadata=metadata, info_dict=OPTIONS.info_dict) print "Loading target..." target_data = LoadSystemFiles(target_zip) print "Loading source..." source_data = LoadSystemFiles(source_zip) verbatim_targets = [] patch_list = [] diffs = [] renames = {} largest_source_size = 0 matching_file_cache = {} for fn in source_data.keys(): sf = source_data[fn] assert fn == sf.name matching_file_cache["path:" + fn] = sf # Only allow eligability for filename/sha matching # if there isn't a perfect path match. if target_data.get(sf.name) is None: matching_file_cache["file:" + fn.split("/")[-1]] = sf matching_file_cache["sha:" + sf.sha1] = sf for fn in sorted(target_data.keys()): tf = target_data[fn] assert fn == tf.name sf = ClosestFileMatch(tf, matching_file_cache, renames) if sf is not None and sf.name != tf.name: print "File has moved from " + sf.name + " to " + tf.name renames[sf.name] = tf if sf is None or fn in OPTIONS.require_verbatim: # This file should be included verbatim if fn in OPTIONS.prohibit_verbatim: raise common.ExternalError("\"%s\" must be sent verbatim" % (fn, )) print "send", fn, "verbatim" tf.AddToZip(output_zip) verbatim_targets.append((fn, tf.size)) elif tf.sha1 != sf.sha1: # File is different; consider sending as a patch diffs.append(common.Difference(tf, sf)) else: # Target file data identical to source (may still be renamed) pass # common.ComputeDifferences(diffs) # # for diff in diffs: # tf, sf, d = diff.GetPatch() # if d is None or len(d) > tf.size * OPTIONS.patch_threshold: # # patch is almost as big as the file; don't bother patching # tf.AddToZip(output_zip) # verbatim_targets.append((tf.name, tf.size)) # else: # common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d) # patch_list.append((sf.name, tf, sf, tf.size, common.sha1(d).hexdigest())) # largest_source_size = max(largest_source_size, sf.size) if not OPTIONS.override_prop: source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict) target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict) metadata["pre-build"] = source_fp metadata["post-build"] = target_fp script.Mount("/system") script.AssertSomeFingerprint(source_fp, target_fp) source_boot = common.GetBootableImage("/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", OPTIONS.source_info_dict) target_boot = common.GetBootableImage("/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT") updating_boot = (source_boot.data != target_boot.data) #source_recovery = common.GetBootableImage( # "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY", # OPTIONS.source_info_dict) #target_recovery = common.GetBootableImage( # "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY") #updating_recovery = (source_recovery.data != target_recovery.data) updating_recovery = False # Here's how we divide up the progress bar: # 0.1 for verifying the start state (PatchCheck calls) # 0.8 for applying patches (ApplyPatch calls) # 0.1 for unpacking verbatim files, symlinking, and doing the # device-specific commands. AppendAssertions(script, OPTIONS.target_info_dict) device_specific.IncrementalOTA_Assertions() script.Print("Verifying current system...") device_specific.IncrementalOTA_VerifyBegin() script.ShowProgress(0.1, 0) total_verify_size = float(sum([i[2].size for i in patch_list]) + 1) if updating_boot: total_verify_size += source_boot.size so_far = 0 for fn, tf, sf, size, patch_sha in patch_list: script.PatchCheck("/" + fn, tf.sha1, sf.sha1) so_far += sf.size script.SetProgress(so_far / total_verify_size) if updating_boot: d = common.Difference(target_boot, source_boot) _, _, d = d.ComputePatch() print "boot target: %d source: %d diff: %d" % ( target_boot.size, source_boot.size, len(d)) common.ZipWriteStr(output_zip, "patch/boot.img.p", d) boot_type, boot_device = common.GetTypeAndDevice( "/boot", OPTIONS.info_dict) script.PatchCheck( "%s:%s:%d:%s:%d:%s" % (boot_type, boot_device, source_boot.size, source_boot.sha1, target_boot.size, target_boot.sha1)) so_far += source_boot.size script.SetProgress(so_far / total_verify_size) if patch_list or updating_recovery or updating_boot: script.CacheFreeSpaceCheck(largest_source_size) device_specific.IncrementalOTA_VerifyEnd() script.Comment("---- start making changes here ----") device_specific.IncrementalOTA_InstallBegin() if OPTIONS.wipe_user_data: script.Print("Erasing user data...") script.FormatPartition("/data") script.Print("Removing unneeded files...") script.DeleteFiles(["/" + i[0] for i in verbatim_targets] + [ "/" + i for i in sorted(source_data) if i not in target_data and i not in renames ] + ["/system/recovery.img"]) script.ShowProgress(0.8, 0) total_patch_size = float(sum([i[1].size for i in patch_list]) + 1) if updating_boot: total_patch_size += target_boot.size so_far = 0 script.Print("Patching system files...") deferred_patch_list = [] for item in patch_list: fn, tf, sf, size, _ = item if tf.name == "system/build.prop": deferred_patch_list.append(item) continue script.ApplyPatch("/" + fn, "-", tf.size, tf.sha1, sf.sha1, "patch/" + fn + ".p") so_far += tf.size script.SetProgress(so_far / total_patch_size) if updating_boot: # Produce the boot image by applying a patch to the current # contents of the boot partition, and write it back to the # partition. script.Print("Patching boot image...") script.ApplyPatch( "%s:%s:%d:%s:%d:%s" % (boot_type, boot_device, source_boot.size, source_boot.sha1, target_boot.size, target_boot.sha1), "-", target_boot.size, target_boot.sha1, source_boot.sha1, "patch/boot.img.p") so_far += target_boot.size script.SetProgress(so_far / total_patch_size) print "boot image changed; including." else: print "boot image unchanged; skipping." if updating_recovery: # Recovery is generated as a patch using both the boot image # (which contains the same linux kernel as recovery) and the file # /system/etc/recovery-resource.dat (which contains all the images # used in the recovery UI) as sources. This lets us minimize the # size of the patch, which must be included in every OTA package. # # For older builds where recovery-resource.dat is not present, we # use only the boot image as the source. MakeRecoveryPatch(OPTIONS.target_tmp, output_zip, target_recovery, target_boot) script.DeleteFiles([ "/system/recovery-from-boot.p", "/system/etc/install-recovery.sh" ]) print "recovery image changed; including as patch from boot." else: print "recovery image unchanged; skipping." script.ShowProgress(0.1, 10) target_symlinks = CopySystemFiles(target_zip, None) target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks]) temp_script = script.MakeTemporary() Item.GetMetadata(target_zip) Item.Get("system").SetPermissions(temp_script) # Note that this call will mess up the tree of Items, so make sure # we're done with it. source_symlinks = CopySystemFiles(source_zip, None) source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks]) # Delete all the symlinks in source that aren't in target. This # needs to happen before verbatim files are unpacked, in case a # symlink in the source is replaced by a real file in the target. to_delete = [] for dest, link in source_symlinks: if link not in target_symlinks_d: to_delete.append(link) script.DeleteFiles(to_delete) if verbatim_targets: script.Print("Unpacking new files...") script.UnpackPackageDir("system", "/system") #if updating_recovery: # script.Print("Unpacking new recovery...") # script.UnpackPackageDir("recovery", "/system") if len(renames) > 0: script.Print("Renaming files...") for src in renames: print "Renaming " + src + " to " + renames[src].name script.RenameFile(src, renames[src].name) script.Print("Symlinks and permissions...") # Create all the symlinks that don't already exist, or point to # somewhere different than what we want. Delete each symlink before # creating it, since the 'symlink' command won't overwrite. to_create = [] for dest, link in target_symlinks: if link in source_symlinks_d: if dest != source_symlinks_d[link]: to_create.append((dest, link)) else: to_create.append((dest, link)) script.DeleteFiles([i[1] for i in to_create]) script.MakeSymlinks(to_create) # Now that the symlinks are created, we can set all the # permissions. script.AppendScript(temp_script) # Do device-specific installation (eg, write radio image). device_specific.IncrementalOTA_InstallEnd() if OPTIONS.extra_script is not None: script.AppendExtra(OPTIONS.extra_script) # Patch the build.prop file last, so if something fails but the # device can still come up, it appears to be the old build and will # get set the OTA package again to retry. script.Print("Patching remaining system files...") for item in deferred_patch_list: fn, tf, sf, size, _ = item script.ApplyPatch("/" + fn, "-", tf.size, tf.sha1, sf.sha1, "patch/" + fn + ".p") script.SetPermissions("/system/build.prop", 0, 0, 0644, None, None) script.AddToZip(target_zip, output_zip) WriteMetadata(metadata, output_zip)
def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, apk_keys, apex_keys, key_passwords, platform_api_level, codename_to_api_level_map, compressed_extension): # maxsize measures the maximum filename length, including the ones to be # skipped. maxsize = max( [len(os.path.basename(i.filename)) for i in input_tf_zip.infolist() if GetApkFileInfo(i.filename, compressed_extension, [])[0]]) system_root_image = misc_info.get("system_root_image") == "true" for info in input_tf_zip.infolist(): filename = info.filename if filename.startswith("IMAGES/"): continue # Skip split super images, which will be re-generated during signing. if filename.startswith("OTA/") and filename.endswith(".img"): continue data = input_tf_zip.read(filename) out_info = copy.copy(info) (is_apk, is_compressed, should_be_skipped) = GetApkFileInfo( filename, compressed_extension, OPTIONS.skip_apks_with_path_prefix) if is_apk and should_be_skipped: # Copy skipped APKs verbatim. print( "NOT signing: %s\n" " (skipped due to matching prefix)" % (filename,)) common.ZipWriteStr(output_tf_zip, out_info, data) # Sign APKs. elif is_apk: name = os.path.basename(filename) if is_compressed: name = name[:-len(compressed_extension)] key = apk_keys[name] if key not in common.SPECIAL_CERT_STRINGS: print(" signing: %-*s (%s)" % (maxsize, name, key)) signed_data = SignApk(data, key, key_passwords[key], platform_api_level, codename_to_api_level_map, is_compressed) common.ZipWriteStr(output_tf_zip, out_info, signed_data) else: # an APK we're not supposed to sign. print( "NOT signing: %s\n" " (skipped due to special cert string)" % (name,)) common.ZipWriteStr(output_tf_zip, out_info, data) # Sign bundled APEX files. elif filename.startswith("SYSTEM/apex") and filename.endswith(".apex"): name = os.path.basename(filename) payload_key, container_key = apex_keys[name] # We've asserted not having a case with only one of them PRESIGNED. if (payload_key not in common.SPECIAL_CERT_STRINGS and container_key not in common.SPECIAL_CERT_STRINGS): print(" signing: %-*s container (%s)" % ( maxsize, name, container_key)) print(" : %-*s payload (%s)" % ( maxsize, name, payload_key)) signed_apex = apex_utils.SignApex( data, payload_key, container_key, key_passwords[container_key], codename_to_api_level_map, OPTIONS.avb_extra_args.get('apex')) common.ZipWrite(output_tf_zip, signed_apex, filename) else: print( "NOT signing: %s\n" " (skipped due to special cert string)" % (name,)) common.ZipWriteStr(output_tf_zip, out_info, data) # AVB public keys for the installed APEXes, which will be updated later. elif (os.path.dirname(filename) == 'SYSTEM/etc/security/apex' and filename != 'SYSTEM/etc/security/apex/'): continue # System properties. elif filename in ("SYSTEM/build.prop", "VENDOR/build.prop", "SYSTEM/vendor/build.prop", "ODM/build.prop", # legacy "ODM/etc/build.prop", "VENDOR/odm/build.prop", # legacy "VENDOR/odm/etc/build.prop", "PRODUCT/build.prop", "SYSTEM/product/build.prop", "PRODUCT_SERVICES/build.prop", "SYSTEM/product_services/build.prop", "SYSTEM/etc/prop.default", "BOOT/RAMDISK/prop.default", "BOOT/RAMDISK/default.prop", # legacy "ROOT/default.prop", # legacy "RECOVERY/RAMDISK/prop.default", "RECOVERY/RAMDISK/default.prop"): # legacy print("Rewriting %s:" % (filename,)) if stat.S_ISLNK(info.external_attr >> 16): new_data = data else: new_data = RewriteProps(data) common.ZipWriteStr(output_tf_zip, out_info, new_data) # Replace the certs in *mac_permissions.xml (there could be multiple, such # as {system,vendor}/etc/selinux/{plat,nonplat}_mac_permissions.xml). elif filename.endswith("mac_permissions.xml"): print("Rewriting %s with new keys." % (filename,)) new_data = ReplaceCerts(data) common.ZipWriteStr(output_tf_zip, out_info, new_data) # Ask add_img_to_target_files to rebuild the recovery patch if needed. elif filename in ("SYSTEM/recovery-from-boot.p", "SYSTEM/etc/recovery.img", "SYSTEM/bin/install-recovery.sh"): OPTIONS.rebuild_recovery = True # Don't copy OTA certs if we're replacing them. elif ( OPTIONS.replace_ota_keys and filename in ( "BOOT/RAMDISK/system/etc/security/otacerts.zip", "BOOT/RAMDISK/system/etc/update_engine/update-payload-key.pub.pem", "RECOVERY/RAMDISK/system/etc/security/otacerts.zip", "SYSTEM/etc/security/otacerts.zip", "SYSTEM/etc/update_engine/update-payload-key.pub.pem")): pass # Skip META/misc_info.txt since we will write back the new values later. elif filename == "META/misc_info.txt": pass # Skip verity public key if we will replace it. elif (OPTIONS.replace_verity_public_key and filename in ("BOOT/RAMDISK/verity_key", "ROOT/verity_key")): pass # Skip verity keyid (for system_root_image use) if we will replace it. elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline": pass # Skip the care_map as we will regenerate the system/vendor images. elif filename == "META/care_map.pb" or filename == "META/care_map.txt": pass # Updates system_other.avbpubkey in /product/etc/. elif filename in ( "PRODUCT/etc/security/avb/system_other.avbpubkey", "SYSTEM/product/etc/security/avb/system_other.avbpubkey"): # Only update system_other's public key, if the corresponding signing # key is specified via --avb_system_other_key. signing_key = OPTIONS.avb_keys.get("system_other") if signing_key: public_key = common.ExtractAvbPublicKey(signing_key) print(" Rewriting AVB public key of system_other in /product") common.ZipWrite(output_tf_zip, public_key, filename) # Should NOT sign boot-debug.img. elif filename in ( "BOOT/RAMDISK/force_debuggable", "RECOVERY/RAMDISK/force_debuggable" "RECOVERY/RAMDISK/first_stage_ramdisk/force_debuggable"): raise common.ExternalError("debuggable boot.img cannot be signed") # A non-APK file; copy it verbatim. else: common.ZipWriteStr(output_tf_zip, out_info, data) if OPTIONS.replace_ota_keys: ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) # Replace the keyid string in misc_info dict. if OPTIONS.replace_verity_private_key: ReplaceVerityPrivateKey(misc_info, OPTIONS.replace_verity_private_key[1]) if OPTIONS.replace_verity_public_key: dest = "ROOT/verity_key" if system_root_image else "BOOT/RAMDISK/verity_key" # We are replacing the one in boot image only, since the one under # recovery won't ever be needed. ReplaceVerityPublicKey( output_tf_zip, dest, OPTIONS.replace_verity_public_key[1]) # Replace the keyid string in BOOT/cmdline. if OPTIONS.replace_verity_keyid: ReplaceVerityKeyId(input_tf_zip, output_tf_zip, OPTIONS.replace_verity_keyid[1]) # Replace the AVB signing keys, if any. ReplaceAvbSigningKeys(misc_info) # Write back misc_info with the latest values. ReplaceMiscInfoTxt(input_tf_zip, output_tf_zip, misc_info)
def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info, apk_keys, apex_keys, key_passwords, platform_api_level, codename_to_api_level_map, compressed_extension): # maxsize measures the maximum filename length, including the ones to be # skipped. maxsize = max( [len(os.path.basename(i.filename)) for i in input_tf_zip.infolist() if GetApkFileInfo(i.filename, compressed_extension, [])[0]]) system_root_image = misc_info.get("system_root_image") == "true" for info in input_tf_zip.infolist(): filename = info.filename if filename.startswith("IMAGES/"): continue # Skip OTA-specific images (e.g. split super images), which will be # re-generated during signing. if filename.startswith("OTA/") and filename.endswith(".img"): continue data = input_tf_zip.read(filename) out_info = copy.copy(info) (is_apk, is_compressed, should_be_skipped) = GetApkFileInfo( filename, compressed_extension, OPTIONS.skip_apks_with_path_prefix) if is_apk and should_be_skipped: # Copy skipped APKs verbatim. print( "NOT signing: %s\n" " (skipped due to matching prefix)" % (filename,)) common.ZipWriteStr(output_tf_zip, out_info, data) # Sign APKs. elif is_apk: name = os.path.basename(filename) if is_compressed: name = name[:-len(compressed_extension)] key = apk_keys[name] if key not in common.SPECIAL_CERT_STRINGS: print(" signing: %-*s (%s)" % (maxsize, name, key)) signed_data = SignApk(data, key, key_passwords[key], platform_api_level, codename_to_api_level_map, is_compressed) common.ZipWriteStr(output_tf_zip, out_info, signed_data) else: # an APK we're not supposed to sign. print( "NOT signing: %s\n" " (skipped due to special cert string)" % (name,)) common.ZipWriteStr(output_tf_zip, out_info, data) # Sign bundled APEX files. elif filename.startswith("SYSTEM/apex") and filename.endswith(".apex"): name = os.path.basename(filename) payload_key, container_key = apex_keys[name] # We've asserted not having a case with only one of them PRESIGNED. if (payload_key not in common.SPECIAL_CERT_STRINGS and container_key not in common.SPECIAL_CERT_STRINGS): print(" signing: %-*s container (%s)" % ( maxsize, name, container_key)) print(" : %-*s payload (%s)" % ( maxsize, name, payload_key)) avbtool = misc_info['avb_avbtool'] if 'avb_avbtool' in misc_info else 'avbtool' signed_apex = apex_utils.SignApex( avbtool, data, payload_key, container_key, key_passwords[container_key], apk_keys, codename_to_api_level_map, no_hashtree=True, signing_args=OPTIONS.avb_extra_args.get('apex')) common.ZipWrite(output_tf_zip, signed_apex, filename) else: print( "NOT signing: %s\n" " (skipped due to special cert string)" % (name,)) common.ZipWriteStr(output_tf_zip, out_info, data) # AVB public keys for the installed APEXes, which will be updated later. elif (os.path.dirname(filename) == 'SYSTEM/etc/security/apex' and filename != 'SYSTEM/etc/security/apex/'): continue # System properties. elif filename in ( "SYSTEM/build.prop", "VENDOR/build.prop", "SYSTEM/vendor/build.prop", "ODM/etc/build.prop", "VENDOR/odm/etc/build.prop", "PRODUCT/build.prop", "SYSTEM/product/build.prop", "SYSTEM_EXT/build.prop", "SYSTEM/system_ext/build.prop", "SYSTEM/etc/prop.default", "BOOT/RAMDISK/prop.default", "RECOVERY/RAMDISK/prop.default", # ROOT/default.prop is a legacy path, but may still exist for upgrading # devices that don't support `property_overrides_split_enabled`. "ROOT/default.prop", # RECOVERY/RAMDISK/default.prop is a legacy path, but will always exist # as a symlink in the current code. So it's a no-op here. Keeping the # path here for clarity. "RECOVERY/RAMDISK/default.prop"): print("Rewriting %s:" % (filename,)) if stat.S_ISLNK(info.external_attr >> 16): new_data = data else: new_data = RewriteProps(data.decode()) common.ZipWriteStr(output_tf_zip, out_info, new_data) # Replace the certs in *mac_permissions.xml (there could be multiple, such # as {system,vendor}/etc/selinux/{plat,nonplat}_mac_permissions.xml). elif filename.endswith("mac_permissions.xml"): print("Rewriting %s with new keys." % (filename,)) new_data = ReplaceCerts(data.decode()) common.ZipWriteStr(output_tf_zip, out_info, new_data) elif info.filename.startswith("SYSTEM/etc/permissions/"): print("rewriting %s with new keys." % info.filename) new_data = ReplaceCerts(data) common.ZipWriteStr(output_tf_zip, out_info, new_data) # Ask add_img_to_target_files to rebuild the recovery patch if needed. elif filename in ("SYSTEM/recovery-from-boot.p", "VENDOR/recovery-from-boot.p", "SYSTEM/etc/recovery.img", "VENDOR/etc/recovery.img", "SYSTEM/bin/install-recovery.sh", "VENDOR/bin/install-recovery.sh"): OPTIONS.rebuild_recovery = True # Don't copy OTA certs if we're replacing them. # Replacement of update-payload-key.pub.pem was removed in b/116660991. elif ( OPTIONS.replace_ota_keys and filename in ( "BOOT/RAMDISK/system/etc/security/otacerts.zip", "RECOVERY/RAMDISK/system/etc/security/otacerts.zip", "SYSTEM/etc/security/otacerts.zip")): pass # Skip META/misc_info.txt since we will write back the new values later. elif filename == "META/misc_info.txt": pass # Skip verity public key if we will replace it. elif (OPTIONS.replace_verity_public_key and filename in ("BOOT/RAMDISK/verity_key", "ROOT/verity_key")): pass elif (OPTIONS.remove_avb_public_keys and (filename.startswith("BOOT/RAMDISK/avb/") or filename.startswith("BOOT/RAMDISK/first_stage_ramdisk/avb/"))): matched_removal = False for key_to_remove in OPTIONS.remove_avb_public_keys: if filename.endswith(key_to_remove): matched_removal = True print("Removing AVB public key from ramdisk: %s" % filename) break if not matched_removal: # Copy it verbatim if we don't want to remove it. common.ZipWriteStr(output_tf_zip, out_info, data) # Skip verity keyid (for system_root_image use) if we will replace it. elif OPTIONS.replace_verity_keyid and filename == "BOOT/cmdline": pass # Skip the care_map as we will regenerate the system/vendor images. elif filename == "META/care_map.pb" or filename == "META/care_map.txt": pass # Updates system_other.avbpubkey in /product/etc/. elif filename in ( "PRODUCT/etc/security/avb/system_other.avbpubkey", "SYSTEM/product/etc/security/avb/system_other.avbpubkey"): # Only update system_other's public key, if the corresponding signing # key is specified via --avb_system_other_key. signing_key = OPTIONS.avb_keys.get("system_other") if signing_key: avbtool = misc_info['avb_avbtool'] if 'avb_avbtool' in misc_info else 'avbtool' public_key = common.ExtractAvbPublicKey( avbtool, signing_key) print(" Rewriting AVB public key of system_other in /product") common.ZipWrite(output_tf_zip, public_key, filename) # Should NOT sign boot-debug.img. elif filename in ( "BOOT/RAMDISK/force_debuggable", "BOOT/RAMDISK/first_stage_ramdisk/force_debuggable"): raise common.ExternalError("debuggable boot.img cannot be signed") # A non-APK file; copy it verbatim. else: common.ZipWriteStr(output_tf_zip, out_info, data) if OPTIONS.replace_ota_keys: ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info) # Replace the keyid string in misc_info dict. if OPTIONS.replace_verity_private_key: ReplaceVerityPrivateKey(misc_info, OPTIONS.replace_verity_private_key[1]) if OPTIONS.replace_verity_public_key: # Replace the one in root dir in system.img. ReplaceVerityPublicKey( output_tf_zip, 'ROOT/verity_key', OPTIONS.replace_verity_public_key[1]) if not system_root_image: # Additionally replace the copy in ramdisk if not using system-as-root. ReplaceVerityPublicKey( output_tf_zip, 'BOOT/RAMDISK/verity_key', OPTIONS.replace_verity_public_key[1]) # Replace the keyid string in BOOT/cmdline. if OPTIONS.replace_verity_keyid: ReplaceVerityKeyId(input_tf_zip, output_tf_zip, OPTIONS.replace_verity_keyid[1]) # Replace the AVB signing keys, if any. ReplaceAvbSigningKeys(misc_info) # Rewrite the props in AVB signing args. if misc_info.get('avb_enable') == 'true': RewriteAvbProps(misc_info) # Write back misc_info with the latest values. ReplaceMiscInfoTxt(input_tf_zip, output_tf_zip, misc_info)
def zero_pad(data, size): if len(data) > size: raise common.ExternalError("Binary is already larger than pad size") return data + (b'\x00' * (size - len(data)))