def extract(payload_file_name, output_dir="output", partition_names=None): try: os.makedirs(output_dir) except OSError as e: if e.errno != errno.EEXIST: raise with open(payload_file_name, 'rb') as payload_file: payload = update_payload.Payload(payload_file) payload.Init() if payload.IsDelta(): print("Delta payloads are not supported") exit(1) helper = applier.PayloadApplier(payload) for part in payload.manifest.partitions: if partition_names and part.partition_name not in partition_names: continue print("Extracting {}".format(part.partition_name)) output_file = os.path.join(output_dir, part.partition_name + '.img') helper._ApplyToPartition(part.operations, part.partition_name, 'install_operations', output_file, part.new_partition_info)
def list_content(payload_file_name): with open(payload_file_name, 'rb') as payload_file: payload = update_payload.Payload(payload_file) payload.Init() for part in payload.manifest.partitions: print("{} ({} bytes)".format(part.partition_name, part.new_partition_info.size))
def IsDeltaFormatFile(filename): try: with open(filename) as payload_file: payload = update_payload.Payload(payload_file) payload.Init() return payload.IsDelta() except (IOError, update_payload.PayloadError): # For unit tests we may not have real files, so it's ok to ignore these # errors. return False
def run_ota(source, target, payload_path, tempdir): """Run an OTA on host side""" payload = update_payload.Payload(payload_path) payload.Init() if zipfile.is_zipfile(source): source = zipfile.ZipFile(source) if zipfile.is_zipfile(target): target = zipfile.ZipFile(target) old_partitions = [] new_partitions = [] expected_new_partitions = [] for part in payload.manifest.partitions: name = part.partition_name old_image = os.path.join(tempdir, "source_" + name + ".img") new_image = os.path.join(tempdir, "target_" + name + ".img") print("Extracting source image for", name) extract_img(source, name, old_image) print("Extracting target image for", name) extract_img(target, name, new_image) old_partitions.append(old_image) scratch_image_name = new_image + ".actual" new_partitions.append(scratch_image_name) with open(scratch_image_name, "wb") as fp: fp.truncate(part.new_partition_info.size) expected_new_partitions.append(new_image) delta_generator_args = ["delta_generator", "--in_file=" + payload_path] partition_names = [ part.partition_name for part in payload.manifest.partitions ] delta_generator_args.append("--partition_names=" + ":".join(partition_names)) delta_generator_args.append("--old_partitions=" + ":".join(old_partitions)) delta_generator_args.append("--new_partitions=" + ":".join(new_partitions)) subprocess.check_output(delta_generator_args) valid = True for (expected_part, actual_part, part_name) in \ zip(expected_new_partitions, new_partitions, partition_names): if filecmp.cmp(expected_part, actual_part): print("Partition `{}` is valid".format(part_name)) else: valid = False print( "Partition `{}` is INVALID expected image: {} actual image: {}" .format(part_name, expected_part, actual_part)) if not valid and sys.stdout.isatty(): input( "Paused to investigate invalid partitions, press any key to exit.")
def Run(self): """Parse the update payload and display information from it.""" self.payload = update_payload.Payload(self.options.payload_file) self.payload.Init() self._DisplayHeader() self._DisplayManifest() if self.options.signatures: self._DisplaySignatures() if self.options.stats: self._DisplayStats(self.payload.manifest) if self.options.list_ops: print() for partition in self.payload.manifest.partitions: self._DisplayOps( '%s install operations' % partition.partition_name, partition.operations)
def _GetMetadataSize(payload_filename): """Gets the size of the metadata in a payload file. Args: payload_filename: Path to the payload file. Returns: The size of the payload metadata, as reported in the payload header. """ try: with open(payload_filename) as payload_file: payload = update_payload.Payload(payload_file) payload.Init() return payload.metadata_size except (IOError, update_payload.PayloadError): # For unit tests we may not have real files, so it's ok to ignore these # errors. return 0
def extract(payload_file_name, output_dir="output", old_dir="old", partition_names=None, skip_hash=None): try: os.makedirs(output_dir) except OSError as e: if e.errno != errno.EEXIST: raise for i in glob.glob(old_dir + '/*.img'): os.rename(i, i[:-4]) with open(payload_file_name, 'rb') as payload_file: payload = update_payload.Payload(payload_file) payload.Init() helper = applier.PayloadApplier(payload) for part in payload.manifest.partitions: if partition_names and part.partition_name not in partition_names: continue print("Extracting {}".format(part.partition_name)) output_file = os.path.join(output_dir, part.partition_name) if payload.IsDelta(): old_file = os.path.join(old_dir, part.partition_name) helper._ApplyToPartition(part.operations, part.partition_name, 'install_operations', output_file, part.new_partition_info, old_file, part.old_partition_info, skip_hash) else: helper._ApplyToPartition(part.operations, part.partition_name, 'install_operations', output_file, part.new_partition_info, skip_hash=skip_hash) for i in glob.glob(old_dir + '/*'): os.rename(i, i + '.img') for i in glob.glob(output_dir + '/*'): os.rename(i, i + '.img')
def extract(payload_file_name, output_dir="output", source_dir="source", partition_names=None): try: os.makedirs(output_dir) except OSError as e: if e.errno != errno.EEXIST: raise with open(payload_file_name, 'rb') as payload_file: payload = update_payload.Payload(payload_file) payload.Init() helper = applier.PayloadApplier(payload) if payload.IsDelta(): print("Delta payload Detected") if not os.path.isdir(source_dir): raise SystemExit("Source directory not found") for part in payload.manifest.partitions: if partition_names and part.partition_name not in partition_names: continue source_file = os.path.join(source_dir, part.partition_name + ".img") if not os.path.isfile(source_file): print("Source for partition " + part.partition_name + " not found, skipping...") continue print("Patching {}".format(part.partition_name)) output_file = os.path.join(output_dir, part.partition_name + ".img") helper._ApplyToPartition( part.operations, part.partition_name, 'install_operations', output_file, part.new_partition_info, source_file) else: for part in payload.manifest.partitions: if partition_names and part.partition_name not in partition_names: continue print("Extracting {}".format(part.partition_name)) output_file = os.path.join(output_dir, part.partition_name + ".img") helper._ApplyToPartition( part.operations, part.partition_name, 'install_operations', output_file, part.new_partition_info)
def Run(self): """Parse the update payload and display information from it.""" self.payload = update_payload.Payload(self.options.payload_file) self.payload.Init() self._DisplayHeader() self._DisplayManifest() if self.options.signatures: self._DisplaySignatures() if self.options.stats: self._DisplayStats(self.payload.manifest) if self.options.list_ops: print() if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO: for partition in self.payload.manifest.partitions: self._DisplayOps( '%s install operations' % partition.partition_name, partition.operations) else: self._DisplayOps('Install operations', self.payload.manifest.install_operations) self._DisplayOps( 'Kernel install operations', self.payload.manifest.kernel_install_operations)
def extract_ota(payload_path, list_partitions, output_dir, partitions): """Extract OTA payload""" payload = update_payload.Payload(payload_path) payload.Init() new_parts = {} new_part_info = {} install_operations = [] for part in payload.manifest.partitions: name = part.partition_name if list_partitions: print(name) if partitions and name not in partitions: continue new_image = os.path.join(output_dir, name + ".img") new_parts[name] = new_image new_part_info[name] = part.new_partition_info install_operations.append((name, part.operations)) if not list_partitions: for name, operations in install_operations: applier.PayloadApplier(payload)._ApplyToPartition( operations, name, '%s_install_operations' % name, new_parts[name], new_part_info[name])
def run_ota(source, target, payload_path, tempdir, output_dir): """Run an OTA on host side""" payload = update_payload.Payload(payload_path) payload.Init() if source and zipfile.is_zipfile(source): source = zipfile.ZipFile(source) if target and zipfile.is_zipfile(target): target = zipfile.ZipFile(target) source_exist = source and (isinstance(source, zipfile.ZipFile) or os.path.exists(source)) target_exist = target and (isinstance(target, zipfile.ZipFile) or os.path.exists(target)) old_partitions = [] new_partitions = [] expected_new_partitions = [] for part in payload.manifest.partitions: name = part.partition_name old_image = os.path.join(tempdir, "source_" + name + ".img") new_image = os.path.join(tempdir, "target_" + name + ".img") if part.HasField("old_partition_info"): assert source_exist, \ "source target file must point to a valid zipfile or directory " + \ source print("Extracting source image for", name) extract_img(source, name, old_image) if target_exist: print("Extracting target image for", name) extract_img(target, name, new_image) old_partitions.append(old_image) scratch_image_name = new_image + ".actual" new_partitions.append(scratch_image_name) with open(scratch_image_name, "wb") as fp: fp.truncate(part.new_partition_info.size) expected_new_partitions.append(new_image) delta_generator_args = ["delta_generator", "--in_file=" + payload_path] partition_names = [ part.partition_name for part in payload.manifest.partitions ] if payload.manifest.partial_update: delta_generator_args.append("--is_partial_update") if payload.is_incremental: delta_generator_args.append("--old_partitions=" + ":".join(old_partitions)) delta_generator_args.append("--partition_names=" + ":".join(partition_names)) delta_generator_args.append("--new_partitions=" + ":".join(new_partitions)) print("Running ", " ".join(delta_generator_args)) subprocess.check_output(delta_generator_args) valid = True if not target_exist: for part in new_partitions: print("Output written to", part) shutil.copy(part, output_dir) return for (expected_part, actual_part, part_name) in \ zip(expected_new_partitions, new_partitions, partition_names): if filecmp.cmp(expected_part, actual_part): print("Partition `{}` is valid".format(part_name)) else: valid = False print( "Partition `{}` is INVALID expected image: {} actual image: {}" .format(part_name, expected_part, actual_part)) if not valid and sys.stdout.isatty(): input( "Paused to investigate invalid partitions, press any key to exit.")
def main(argv): # Parse and validate arguments. options, payload_file_name, extra_args = ParseArguments(argv[1:]) with open(payload_file_name) as payload_file: payload = update_payload.Payload(payload_file) try: # Initialize payload. payload.Init() if options.describe: payload.Describe() # Perform payload integrity checks. if options.check: report_file = None do_close_report_file = False metadata_sig_file = None try: if options.report: if options.report == '-': report_file = sys.stdout else: report_file = open(options.report, 'w') do_close_report_file = True metadata_sig_file = options.meta_sig and open( options.meta_sig) payload.Check(pubkey_file_name=options.key, metadata_sig_file=metadata_sig_file, report_out_file=report_file, assert_type=options.assert_type, block_size=int(options.block_size), rootfs_part_size=options.root_part_size, kernel_part_size=options.kern_part_size, allow_unhashed=options.allow_unhashed, disabled_tests=options.disabled_tests) finally: if metadata_sig_file: metadata_sig_file.close() if do_close_report_file: report_file.close() # Trace blocks. if options.root_block is not None: payload.TraceBlock(options.root_block, options.skip, sys.stdout, False) if options.kern_block is not None: payload.TraceBlock(options.kern_block, options.skip, sys.stdout, True) # Apply payload. if extra_args: dargs = {'bsdiff_in_place': not options.extract_bsdiff} if options.bspatch_path: dargs['bspatch_path'] = options.bspatch_path if options.puffpatch_path: dargs['puffpatch_path'] = options.puffpatch_path if options.assert_type == _TYPE_DELTA: dargs['old_kernel_part'] = extra_args[2] dargs['old_rootfs_part'] = extra_args[3] payload.Apply(extra_args[0], extra_args[1], **dargs) except update_payload.PayloadError, e: sys.stderr.write('Error: %s\n' % e) return 1
def main(argv): # Parse and validate arguments. args = ParseArguments(argv[1:]) with open(args.payload) as payload_file: payload = update_payload.Payload(payload_file) try: # Initialize payload. payload.Init() if args.describe: payload.Describe() # Perform payload integrity checks. if args.check: report_file = None do_close_report_file = False metadata_sig_file = None try: if args.report: if args.report == '-': report_file = sys.stdout else: report_file = open(args.report, 'w') do_close_report_file = True part_sizes = dict(zip(args.part_names, args.part_sizes)) metadata_sig_file = args.meta_sig and open(args.meta_sig) payload.Check(pubkey_file_name=args.key, metadata_sig_file=metadata_sig_file, metadata_size=int(args.metadata_size), report_out_file=report_file, assert_type=args.assert_type, block_size=int(args.block_size), part_sizes=part_sizes, allow_unhashed=args.allow_unhashed, disabled_tests=args.disabled_tests) finally: if metadata_sig_file: metadata_sig_file.close() if do_close_report_file: report_file.close() # Apply payload. if all(args.dst_part_paths) or all(args.out_dst_part_paths): dargs = {'bsdiff_in_place': not args.extract_bsdiff} if args.bspatch_path: dargs['bspatch_path'] = args.bspatch_path if args.puffpatch_path: dargs['puffpatch_path'] = args.puffpatch_path if args.assert_type == _TYPE_DELTA: dargs['old_parts'] = dict( zip(args.part_names, args.src_part_paths)) out_dst_parts = {} file_handles = [] if all(args.out_dst_part_paths): for name, path in zip(args.part_names, args.out_dst_part_paths): handle = open(path, 'w+') file_handles.append(handle) out_dst_parts[name] = handle.name else: for name in args.part_names: handle = tempfile.NamedTemporaryFile() file_handles.append(handle) out_dst_parts[name] = handle.name payload.Apply(out_dst_parts, **dargs) # If destination kernel and rootfs partitions are not given, then this # just becomes an apply operation with no check. if all(args.dst_part_paths): # Prior to comparing, add the unused space past the filesystem # boundary in the new target partitions to become the same size as # the given partitions. This will truncate to larger size. for part_name, out_dst_part, dst_part in zip( args.part_names, file_handles, args.dst_part_paths): out_dst_part.truncate(os.path.getsize(dst_part)) # Compare resulting partitions with the ones from the target image. if not filecmp.cmp(out_dst_part.name, dst_part): raise error.PayloadError( 'Resulting %s partition corrupted.' % part_name) # Close the output files. If args.out_dst_* was not given, then these # files are created as temp files and will be deleted upon close(). for handle in file_handles: handle.close() except error.PayloadError, e: sys.stderr.write('Error: %s\n' % e) return 1