예제 #1
0
def SignUncompressedApex(avbtool,
                         apex_file,
                         payload_key,
                         container_key,
                         container_pw,
                         apk_keys,
                         codename_to_api_level_map,
                         no_hashtree,
                         signing_args=None,
                         sign_tool=None):
    """Signs the current uncompressed APEX with the given payload/container keys.

  Args:
    apex_file: Uncompressed APEX file.
    payload_key: The path to payload signing key (w/ extension).
    container_key: The path to container signing key (w/o extension).
    container_pw: The matching password of the container_key, or None.
    apk_keys: A dict that holds the signing keys for apk files.
    codename_to_api_level_map: A dict that maps from codename to API level.
    no_hashtree: Don't include hashtree in the signed APEX.
    signing_args: Additional args to be passed to the payload signer.
    sign_tool: A tool to sign the contents of the APEX.

  Returns:
    The path to the signed APEX file.
  """
    # 1. Extract the apex payload image and sign the files (e.g. APKs). Repack
    # the apex file after signing.
    apk_signer = ApexApkSigner(apex_file, container_pw,
                               codename_to_api_level_map, avbtool, sign_tool)
    apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args)

    # 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
    # payload_key.
    payload_dir = common.MakeTempDir(prefix='apex-payload-')
    with zipfile.ZipFile(apex_file) as apex_fd:
        payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir)
        zip_items = apex_fd.namelist()

    payload_info = ParseApexPayloadInfo(avbtool, payload_file)
    if no_hashtree is None:
        no_hashtree = payload_info.get("Tree Size", 0) == 0
    SignApexPayload(avbtool, payload_file, payload_key,
                    payload_info['apex.key'], payload_info['Algorithm'],
                    payload_info['Salt'], payload_info['Hash Algorithm'],
                    no_hashtree, signing_args)

    # 2b. Update the embedded payload public key.
    payload_public_key = common.ExtractAvbPublicKey(avbtool, payload_key)
    common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE)
    if APEX_PUBKEY in zip_items:
        common.ZipDelete(apex_file, APEX_PUBKEY)
    apex_zip = zipfile.ZipFile(apex_file, 'a', allowZip64=True)
    common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE)
    common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY)
    common.ZipClose(apex_zip)

    # 3. Sign the APEX container with container_key.
    signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex')

    # Specify the 4K alignment when calling SignApk.
    extra_signapk_args = OPTIONS.extra_signapk_args[:]
    extra_signapk_args.extend(['-a', '4096', '--align-file-size'])

    password = container_pw.get(container_key) if container_pw else None
    common.SignFile(apex_file,
                    signed_apex,
                    container_key,
                    password,
                    codename_to_api_level_map=codename_to_api_level_map,
                    extra_signapk_args=extra_signapk_args)

    return signed_apex
예제 #2
0
def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
    """Generates and adds care_map.pb for a/b partition that has care_map.

  Args:
    output_zip: The output zip file (needs to be already open), or None to
        write care_map.pb to OPTIONS.input_tmp/.
    ab_partitions: The list of A/B partitions.
    image_paths: A map from the partition name to the image path.
  """
    care_map_list = []
    for partition in ab_partitions:
        partition = partition.strip()
        if partition not in common.PARTITIONS_WITH_CARE_MAP:
            continue

        verity_block_device = "{}_verity_block_device".format(partition)
        avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
        if (verity_block_device in OPTIONS.info_dict
                or OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
            image_path = image_paths[partition]
            assert os.path.exists(image_path)
            care_map_list += GetCareMap(partition, image_path)

            # adds fingerprint field to the care_map
            build_props = OPTIONS.info_dict.get(partition + ".build.prop", {})
            prop_name_list = [
                "ro.{}.build.fingerprint".format(partition),
                "ro.{}.build.thumbprint".format(partition)
            ]

            present_props = [x for x in prop_name_list if x in build_props]
            if not present_props:
                logger.warning("fingerprint is not present for partition %s",
                               partition)
                property_id, fingerprint = "unknown", "unknown"
            else:
                property_id = present_props[0]
                fingerprint = build_props[property_id]
            care_map_list += [property_id, fingerprint]

    if not care_map_list:
        return

    # Converts the list into proto buf message by calling care_map_generator; and
    # writes the result to a temp file.
    temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
                                             suffix=".txt")
    with open(temp_care_map_text, 'w') as text_file:
        text_file.write('\n'.join(care_map_list))

    temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
    care_map_gen_cmd = [
        "care_map_generator", temp_care_map_text, temp_care_map
    ]
    common.RunAndCheckOutput(care_map_gen_cmd)

    care_map_path = "META/care_map.pb"
    if output_zip and care_map_path not in output_zip.namelist():
        common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
    else:
        shutil.copy(temp_care_map,
                    os.path.join(OPTIONS.input_tmp, care_map_path))
        if output_zip:
            OPTIONS.replace_updated_files_list.append(care_map_path)
 def _GetTestPayload():
     payload_file = common.MakeTempFile(prefix='apex-', suffix='.img')
     with open(payload_file, 'wb') as payload_fp:
         payload_fp.write(os.urandom(8192))
     return payload_file
def CreateImage(input_dir, info_dict, what, block_list=None):
  print("creating " + what + ".img...")

  img = common.MakeTempFile(prefix=what + "-", suffix=".img")

  # The name of the directory it is making an image out of matters to
  # mkyaffs2image.  It wants "system" but we have a directory named
  # "SYSTEM", so create a symlink.
  try:
    os.symlink(os.path.join(input_dir, what.upper()),
               os.path.join(input_dir, what))
  except OSError as e:
    # bogus error on my mac version?
    #   File "./build/tools/releasetools/img_from_target_files"
    #     os.path.join(OPTIONS.input_tmp, "system"))
    # OSError: [Errno 17] File exists
    if e.errno == errno.EEXIST:
      pass

  image_props = build_image.ImagePropFromGlobalDict(info_dict, what)
  fstab = info_dict["fstab"]
  if fstab:
    image_props["fs_type"] = fstab["/" + what].fs_type

  # Use a fixed timestamp (01/01/2009) when packaging the image.
  # Bug: 24377993
  epoch = datetime.datetime.fromtimestamp(0)
  timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
  image_props["timestamp"] = int(timestamp)

  if what == "system":
    fs_config_prefix = ""
  else:
    fs_config_prefix = what + "_"

  fs_config = os.path.join(
      input_dir, "META/" + fs_config_prefix + "filesystem_config.txt")
  if not os.path.exists(fs_config):
    fs_config = None

  fc_config = os.path.join(input_dir, "BOOT/RAMDISK/file_contexts")
  if not os.path.exists(fc_config):
    fc_config = None

  # Override values loaded from info_dict.
  if fs_config:
    image_props["fs_config"] = fs_config
  if fc_config:
    image_props["selinux_fc"] = fc_config
  if block_list:
    image_props["block_list"] = block_list
  if image_props.get("system_root_image") == "true":
    image_props["ramdisk_dir"] = os.path.join(input_dir, "BOOT/RAMDISK")
    image_props["ramdisk_fs_config"] = os.path.join(
        input_dir, "META/boot_filesystem_config.txt")

  succ = build_image.BuildImage(os.path.join(input_dir, what),
                                image_props, img)
  assert succ, "build " + what + ".img image failed"

  return img
예제 #5
0
def VerifyPackage(cert, package):
    """Verify the given package with the certificate.

  (Comments from bootable/recovery/verifier.cpp:)

  An archive with a whole-file signature will end in six bytes:

    (2-byte signature start) $ff $ff (2-byte comment size)

  (As far as the ZIP format is concerned, these are part of the
  archive comment.) We start by reading this footer, this tells
  us how far back from the end we have to start reading to find
  the whole comment.
  """

    print('Package: %s' % (package, ))
    print('Certificate: %s' % (cert, ))

    # Read in the package.
    with open(package) as package_file:
        package_bytes = package_file.read()

    length = len(package_bytes)
    assert length >= 6, "Not big enough to contain footer."

    footer = [ord(x) for x in package_bytes[-6:]]
    assert footer[2] == 0xff and footer[3] == 0xff, "Footer is wrong."

    signature_start_from_end = (footer[1] << 8) + footer[0]
    assert signature_start_from_end > 6, "Signature start is in the footer."

    signature_start = length - signature_start_from_end

    # Determine how much of the file is covered by the signature. This is
    # everything except the signature data and length, which includes all of the
    # EOCD except for the comment length field (2 bytes) and the comment data.
    comment_len = (footer[5] << 8) + footer[4]
    signed_len = length - comment_len - 2

    print('Package length: %d' % (length, ))
    print('Comment length: %d' % (comment_len, ))
    print('Signed data length: %d' % (signed_len, ))
    print('Signature start: %d' % (signature_start, ))

    use_sha256 = CertUsesSha256(cert)
    print('Use SHA-256: %s' % (use_sha256, ))

    if use_sha256:
        h = sha256()
    else:
        h = sha1()
    h.update(package_bytes[:signed_len])
    package_digest = h.hexdigest().lower()

    print('Digest: %s' % (package_digest, ))

    # Get the signature from the input package.
    signature = package_bytes[signature_start:-6]
    sig_file = common.MakeTempFile(prefix='sig-')
    with open(sig_file, 'wb') as f:
        f.write(signature)

    # Parse the signature and get the hash.
    cmd = ['openssl', 'asn1parse', '-inform', 'DER', '-in', sig_file]
    p1 = common.Run(cmd, stdout=subprocess.PIPE)
    sig, _ = p1.communicate()
    assert p1.returncode == 0, "Failed to parse the signature."

    digest_line = sig.strip().split('\n')[-1]
    digest_string = digest_line.split(':')[3]
    digest_file = common.MakeTempFile(prefix='digest-')
    with open(digest_file, 'wb') as f:
        f.write(digest_string.decode('hex'))

    # Verify the digest by outputing the decrypted result in ASN.1 structure.
    decrypted_file = common.MakeTempFile(prefix='decrypted-')
    cmd = [
        'openssl', 'rsautl', '-verify', '-certin', '-inkey', cert, '-in',
        digest_file, '-out', decrypted_file
    ]
    p1 = common.Run(cmd, stdout=subprocess.PIPE)
    p1.communicate()
    assert p1.returncode == 0, "Failed to run openssl rsautl -verify."

    # Parse the output ASN.1 structure.
    cmd = ['openssl', 'asn1parse', '-inform', 'DER', '-in', decrypted_file]
    p1 = common.Run(cmd, stdout=subprocess.PIPE)
    decrypted_output, _ = p1.communicate()
    assert p1.returncode == 0, "Failed to parse the output."

    digest_line = decrypted_output.strip().split('\n')[-1]
    digest_string = digest_line.split(':')[3].lower()

    # Verify that the two digest strings match.
    assert package_digest == digest_string, "Verification failed."

    # Verified successfully upon reaching here.
    print('\nWhole package signature VERIFIED\n')
예제 #6
0
def ConvertBlockMapToBaseFs(block_map_file):
    base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
    convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
    (_, exit_code) = RunCommand(convert_command)
    return base_fs_file if exit_code == 0 else None
def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
    """Returns a target-files.zip file for generating secondary payload.

  Although the original target-files.zip already contains secondary slot
  images (i.e. IMAGES/system_other.img), we need to rename the files to the
  ones without _other suffix. Note that we cannot instead modify the names in
  META/ab_partitions.txt, because there are no matching partitions on device.

  For the partitions that don't have secondary images, the ones for primary
  slot will be used. This is to ensure that we always have valid boot, vbmeta,
  bootloader images in the inactive slot.

  Args:
    input_file: The input target-files.zip file.
    skip_postinstall: Whether to skip copying the postinstall config file.

  Returns:
    The filename of the target-files.zip for generating secondary payload.
  """
    def GetInfoForSecondaryImages(info_file):
        """Updates info file for secondary payload generation.

    Scan each line in the info file, and remove the unwanted partitions from
    the dynamic partition list in the related properties. e.g.
    "super_google_dynamic_partitions_partition_list=system vendor product"
    will become "super_google_dynamic_partitions_partition_list=system".

    Args:
      info_file: The input info file. e.g. misc_info.txt.

    Returns:
      A string of the updated info content.
    """

        output_list = []
        with open(info_file) as f:
            lines = f.read().splitlines()

        # The suffix in partition_list variables that follows the name of the
        # partition group.
        LIST_SUFFIX = 'partition_list'
        for line in lines:
            if line.startswith('#') or '=' not in line:
                output_list.append(line)
                continue
            key, value = line.strip().split('=', 1)
            if key == 'dynamic_partition_list' or key.endswith(LIST_SUFFIX):
                partitions = value.split()
                partitions = [
                    partition for partition in partitions
                    if partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES
                ]
                output_list.append('{}={}'.format(key, ' '.join(partitions)))
            elif key in ['virtual_ab', "virtual_ab_retrofit"]:
                # Remove virtual_ab flag from secondary payload so that OTA client
                # don't use snapshots for secondary update
                pass
            else:
                output_list.append(line)
        return '\n'.join(output_list)

    target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
    target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)

    with zipfile.ZipFile(input_file, 'r') as input_zip:
        infolist = input_zip.infolist()

    input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
    for info in infolist:
        unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
        if info.filename == 'IMAGES/system_other.img':
            common.ZipWrite(target_zip,
                            unzipped_file,
                            arcname='IMAGES/system.img')

        # Primary images and friends need to be skipped explicitly.
        elif info.filename in ('IMAGES/system.img', 'IMAGES/system.map'):
            pass

        # Copy images that are not in SECONDARY_PAYLOAD_SKIPPED_IMAGES.
        elif info.filename.startswith(('IMAGES/', 'RADIO/')):
            image_name = os.path.basename(info.filename)
            if image_name not in [
                    '{}.img'.format(partition)
                    for partition in SECONDARY_PAYLOAD_SKIPPED_IMAGES
            ]:
                common.ZipWrite(target_zip,
                                unzipped_file,
                                arcname=info.filename)

        # Skip copying the postinstall config if requested.
        elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
            pass

        elif info.filename.startswith('META/'):
            # Remove the unnecessary partitions for secondary images from the
            # ab_partitions file.
            if info.filename == AB_PARTITIONS:
                with open(unzipped_file) as f:
                    partition_list = f.read().splitlines()
                partition_list = [
                    partition for partition in partition_list if partition
                    and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES
                ]
                common.ZipWriteStr(target_zip, info.filename,
                                   '\n'.join(partition_list))
            # Remove the unnecessary partitions from the dynamic partitions list.
            elif (info.filename == 'META/misc_info.txt'
                  or info.filename == DYNAMIC_PARTITION_INFO):
                modified_info = GetInfoForSecondaryImages(unzipped_file)
                common.ZipWriteStr(target_zip, info.filename, modified_info)
            else:
                common.ZipWrite(target_zip,
                                unzipped_file,
                                arcname=info.filename)

    common.ZipClose(target_zip)

    return target_file
예제 #8
0
def BuildImage(in_dir, prop_dict, out_file, target_out=None):
    """Build an image to out_file from in_dir with property prop_dict.

  Args:
    in_dir: path of input directory.
    prop_dict: property dictionary.
    out_file: path of the output image file.
    target_out: path of the product out directory to read device specific FS
        config files.

  Returns:
    True iff the image is built successfully.
  """
    # system_root_image=true: build a system.img that combines the contents of
    # /system and the ramdisk, and can be mounted at the root of the file system.
    origin_in = in_dir
    fs_config = prop_dict.get("fs_config")
    if (prop_dict.get("system_root_image") == "true"
            and prop_dict["mount_point"] == "system"):
        in_dir = common.MakeTempDir()
        # Change the mount point to "/".
        prop_dict["mount_point"] = "/"
        if fs_config:
            # We need to merge the fs_config files of system and ramdisk.
            merged_fs_config = common.MakeTempFile(prefix="root_fs_config",
                                                   suffix=".txt")
            with open(merged_fs_config, "w") as fw:
                if "ramdisk_fs_config" in prop_dict:
                    with open(prop_dict["ramdisk_fs_config"]) as fr:
                        fw.writelines(fr.readlines())
                with open(fs_config) as fr:
                    fw.writelines(fr.readlines())
            fs_config = merged_fs_config

    build_command = []
    fs_type = prop_dict.get("fs_type", "")
    run_e2fsck = False

    fs_spans_partition = True
    if fs_type.startswith("squash"):
        fs_spans_partition = False

    is_verity_partition = "verity_block_device" in prop_dict
    verity_supported = prop_dict.get("verity") == "true"
    verity_fec_supported = prop_dict.get("verity_fec") == "true"

    # Adjust the partition size to make room for the hashes if this is to be
    # verified.
    if verity_supported and is_verity_partition:
        partition_size = int(prop_dict.get("partition_size"))
        (adjusted_size,
         verity_size) = AdjustPartitionSizeForVerity(partition_size,
                                                     verity_fec_supported)
        if not adjusted_size:
            return False
        prop_dict["partition_size"] = str(adjusted_size)
        prop_dict["original_partition_size"] = str(partition_size)
        prop_dict["verity_size"] = str(verity_size)

    # Adjust partition size for AVB hash footer or AVB hashtree footer.
    avb_footer_type = ''
    if prop_dict.get("avb_hash_enable") == "true":
        avb_footer_type = 'hash'
    elif prop_dict.get("avb_hashtree_enable") == "true":
        avb_footer_type = 'hashtree'

    if avb_footer_type:
        avbtool = prop_dict["avb_avbtool"]
        partition_size = prop_dict["partition_size"]
        # avb_add_hash_footer_args or avb_add_hashtree_footer_args.
        additional_args = prop_dict["avb_add_" + avb_footer_type +
                                    "_footer_args"]
        max_image_size = AVBCalcMaxImageSize(avbtool, avb_footer_type,
                                             partition_size, additional_args)
        if max_image_size == 0:
            return False
        prop_dict["partition_size"] = str(max_image_size)
        prop_dict["original_partition_size"] = partition_size

    if fs_type.startswith("ext"):
        build_command = [prop_dict["ext_mkuserimg"]]
        if "extfs_sparse_flag" in prop_dict:
            build_command.append(prop_dict["extfs_sparse_flag"])
            run_e2fsck = True
        build_command.extend(
            [in_dir, out_file, fs_type, prop_dict["mount_point"]])
        build_command.append(prop_dict["partition_size"])
        if "journal_size" in prop_dict:
            build_command.extend(["-j", prop_dict["journal_size"]])
        if "timestamp" in prop_dict:
            build_command.extend(["-T", str(prop_dict["timestamp"])])
        if fs_config:
            build_command.extend(["-C", fs_config])
        if target_out:
            build_command.extend(["-D", target_out])
        if "block_list" in prop_dict:
            build_command.extend(["-B", prop_dict["block_list"]])
        if "base_fs_file" in prop_dict:
            base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"])
            if base_fs_file is None:
                return False
            build_command.extend(["-d", base_fs_file])
        build_command.extend(["-L", prop_dict["mount_point"]])
        if "extfs_inode_count" in prop_dict:
            build_command.extend(["-i", prop_dict["extfs_inode_count"]])
        if "flash_erase_block_size" in prop_dict:
            build_command.extend(["-e", prop_dict["flash_erase_block_size"]])
        if "flash_logical_block_size" in prop_dict:
            build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
        # Specify UUID and hash_seed if using mke2fs.
        if prop_dict["ext_mkuserimg"] == "mkuserimg_mke2fs.sh":
            if "uuid" in prop_dict:
                build_command.extend(["-U", prop_dict["uuid"]])
            if "hash_seed" in prop_dict:
                build_command.extend(["-S", prop_dict["hash_seed"]])
        if "ext4_share_dup_blocks" in prop_dict:
            build_command.append("-c")
        if "selinux_fc" in prop_dict:
            build_command.append(prop_dict["selinux_fc"])
    elif fs_type.startswith("squash"):
        build_command = ["mksquashfsimage.sh"]
        build_command.extend([in_dir, out_file])
        if "squashfs_sparse_flag" in prop_dict:
            build_command.extend([prop_dict["squashfs_sparse_flag"]])
        build_command.extend(["-m", prop_dict["mount_point"]])
        if target_out:
            build_command.extend(["-d", target_out])
        if fs_config:
            build_command.extend(["-C", fs_config])
        if "selinux_fc" in prop_dict:
            build_command.extend(["-c", prop_dict["selinux_fc"]])
        if "block_list" in prop_dict:
            build_command.extend(["-B", prop_dict["block_list"]])
        if "squashfs_block_size" in prop_dict:
            build_command.extend(["-b", prop_dict["squashfs_block_size"]])
        if "squashfs_compressor" in prop_dict:
            build_command.extend(["-z", prop_dict["squashfs_compressor"]])
        if "squashfs_compressor_opt" in prop_dict:
            build_command.extend(["-zo", prop_dict["squashfs_compressor_opt"]])
        if prop_dict.get("squashfs_disable_4k_align") == "true":
            build_command.extend(["-a"])
    elif fs_type.startswith("f2fs"):
        build_command = ["mkf2fsuserimg.sh"]
        build_command.extend([out_file, prop_dict["partition_size"]])
        if fs_config:
            build_command.extend(["-C", fs_config])
        build_command.extend(["-f", in_dir])
        if target_out:
            build_command.extend(["-D", target_out])
        if "selinux_fc" in prop_dict:
            build_command.extend(["-s", prop_dict["selinux_fc"]])
        build_command.extend(["-t", prop_dict["mount_point"]])
        if "timestamp" in prop_dict:
            build_command.extend(["-T", str(prop_dict["timestamp"])])
        build_command.extend(["-L", prop_dict["mount_point"]])
    else:
        print("Error: unknown filesystem type '%s'" % (fs_type))
        return False

    if in_dir != origin_in:
        # Construct a staging directory of the root file system.
        ramdisk_dir = prop_dict.get("ramdisk_dir")
        if ramdisk_dir:
            shutil.rmtree(in_dir)
            shutil.copytree(ramdisk_dir, in_dir, symlinks=True)
        staging_system = os.path.join(in_dir, "system")
        shutil.rmtree(staging_system, ignore_errors=True)
        shutil.copytree(origin_in, staging_system, symlinks=True)

    (mkfs_output, exit_code) = RunCommand(build_command)
    if exit_code != 0:
        print("Error: '%s' failed with exit code %d:\n%s" %
              (build_command, exit_code, mkfs_output))
        return False

    # Check if there's enough headroom space available for ext4 image.
    if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
        if not CheckHeadroom(mkfs_output, prop_dict):
            return False

    if not fs_spans_partition:
        mount_point = prop_dict.get("mount_point")
        partition_size = int(prop_dict.get("partition_size"))
        image_size = GetSimgSize(out_file)
        if image_size > partition_size:
            print(
                "Error: %s image size of %d is larger than partition size of "
                "%d" % (mount_point, image_size, partition_size))
            return False
        if verity_supported and is_verity_partition:
            ZeroPadSimg(out_file, partition_size - image_size)

    # Create the verified image if this is to be verified.
    if verity_supported and is_verity_partition:
        if not MakeVerityEnabledImage(out_file, verity_fec_supported,
                                      prop_dict):
            return False

    # Add AVB HASH or HASHTREE footer (metadata).
    if avb_footer_type:
        avbtool = prop_dict["avb_avbtool"]
        original_partition_size = prop_dict["original_partition_size"]
        partition_name = prop_dict["partition_name"]
        # key_path and algorithm are only available when chain partition is used.
        key_path = prop_dict.get("avb_key_path")
        algorithm = prop_dict.get("avb_algorithm")
        salt = prop_dict.get("avb_salt")
        # avb_add_hash_footer_args or avb_add_hashtree_footer_args
        additional_args = prop_dict["avb_add_" + avb_footer_type +
                                    "_footer_args"]
        if not AVBAddFooter(out_file, avbtool, avb_footer_type,
                            original_partition_size, partition_name, key_path,
                            algorithm, salt, additional_args):
            return False

    if run_e2fsck and prop_dict.get("skip_fsck") != "true":
        success, unsparse_image = UnsparseImage(out_file, replace=False)
        if not success:
            return False

        # Run e2fsck on the inflated image file
        e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
        (e2fsck_output, exit_code) = RunCommand(e2fsck_command)

        os.remove(unsparse_image)

        if exit_code != 0:
            print("Error: '%s' failed with exit code %d:\n%s" %
                  (e2fsck_command, exit_code, e2fsck_output))
            return False

    return True
예제 #9
0
def SignCompressedApex(avbtool,
                       apex_file,
                       payload_key,
                       container_key,
                       container_pw,
                       apk_keys,
                       codename_to_api_level_map,
                       no_hashtree,
                       signing_args=None):
    """Signs the current compressed APEX with the given payload/container keys.

  Args:
    apex_file: Raw uncompressed APEX data.
    payload_key: The path to payload signing key (w/ extension).
    container_key: The path to container signing key (w/o extension).
    container_pw: The matching password of the container_key, or None.
    apk_keys: A dict that holds the signing keys for apk files.
    codename_to_api_level_map: A dict that maps from codename to API level.
    no_hashtree: Don't include hashtree in the signed APEX.
    signing_args: Additional args to be passed to the payload signer.

  Returns:
    The path to the signed APEX file.
  """
    debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')

    # 1. Decompress original_apex inside compressed apex.
    original_apex_file = common.MakeTempFile(prefix='original-apex-',
                                             suffix='.apex')
    # Decompression target path should not exist
    os.remove(original_apex_file)
    common.RunAndCheckOutput([
        'deapexer', '--debugfs_path', debugfs_path, 'decompress', '--input',
        apex_file, '--output', original_apex_file
    ])

    # 2. Sign original_apex
    signed_original_apex_file = SignUncompressedApex(
        avbtool, original_apex_file, payload_key, container_key, container_pw,
        apk_keys, codename_to_api_level_map, no_hashtree, signing_args)

    # 3. Compress signed original apex.
    compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
                                               suffix='.capex')
    common.RunAndCheckOutput([
        'apex_compression_tool', 'compress', '--apex_compression_tool_path',
        os.getenv('PATH'), '--input', signed_original_apex_file, '--output',
        compressed_apex_file
    ])

    # 4. Align apex
    aligned_apex = common.MakeTempFile(prefix='apex-container-',
                                       suffix='.capex')
    common.RunAndCheckOutput(
        ['zipalign', '-f', '4096', compressed_apex_file, aligned_apex])

    # 5. Sign the APEX container with container_key.
    signed_apex = common.MakeTempFile(prefix='apex-container-',
                                      suffix='.capex')

    # Specify the 4K alignment when calling SignApk.
    extra_signapk_args = OPTIONS.extra_signapk_args[:]
    extra_signapk_args.extend(['-a', '4096'])

    password = container_pw.get(container_key) if container_pw else None
    common.SignFile(aligned_apex,
                    signed_apex,
                    container_key,
                    password,
                    codename_to_api_level_map=codename_to_api_level_map,
                    extra_signapk_args=extra_signapk_args)

    return signed_apex
예제 #10
0
def SignApex(avbtool,
             apex_data,
             payload_key,
             container_key,
             container_pw,
             apk_keys,
             codename_to_api_level_map,
             no_hashtree,
             signing_args=None):
    """Signs the current APEX with the given payload/container keys.

  Args:
    apex_file: Path to apex file path.
    payload_key: The path to payload signing key (w/ extension).
    container_key: The path to container signing key (w/o extension).
    container_pw: The matching password of the container_key, or None.
    apk_keys: A dict that holds the signing keys for apk files.
    codename_to_api_level_map: A dict that maps from codename to API level.
    no_hashtree: Don't include hashtree in the signed APEX.
    signing_args: Additional args to be passed to the payload signer.

  Returns:
    The path to the signed APEX file.
  """
    apex_file = common.MakeTempFile(prefix='apex-container-', suffix='.apex')
    with open(apex_file, 'wb') as output_fp:
        output_fp.write(apex_data)

    debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
    cmd = [
        'deapexer', '--debugfs_path', debugfs_path, 'info', '--print-type',
        apex_file
    ]

    try:
        apex_type = common.RunAndCheckOutput(cmd).strip()
        if apex_type == 'UNCOMPRESSED':
            return SignUncompressedApex(
                avbtool,
                apex_file,
                payload_key=payload_key,
                container_key=container_key,
                container_pw=None,
                codename_to_api_level_map=codename_to_api_level_map,
                no_hashtree=no_hashtree,
                apk_keys=apk_keys,
                signing_args=signing_args)
        elif apex_type == 'COMPRESSED':
            return SignCompressedApex(
                avbtool,
                apex_file,
                payload_key=payload_key,
                container_key=container_key,
                container_pw=None,
                codename_to_api_level_map=codename_to_api_level_map,
                no_hashtree=no_hashtree,
                apk_keys=apk_keys,
                signing_args=signing_args)
        else:
            # TODO(b/172912232): support signing compressed apex
            raise ApexInfoError('Unsupported apex type {}'.format(apex_type))

    except common.ExternalError as e:
        raise ApexInfoError('Failed to get type for {}:\n{}'.format(
            apex_file, e))
예제 #11
0
 def _gen_fs_config(partition):
     fs_config = common.MakeTempFile(suffix='.txt')
     with open(fs_config, 'w') as fs_config_fp:
         fs_config_fp.write('fs-config-{}\n'.format(partition))
     return fs_config
예제 #12
0
 def Sign(self, in_file):
   """Signs the given input file. Returns the output filename."""
   out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
   cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
   common.RunAndCheckOutput(cmd)
   return out_file
예제 #13
0
 def setUp(self):
     self.bkup = ebmlib.FileBackupMgr(None, u"%s~")
     self.path = common.MakeTempFile("test.txt")
     self.file = ebmlib.FileObjectImpl(self.path)
예제 #14
0
def WriteFullOTAPackage(input_zip, output_file):
    target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)

    # We don't know what version it will be installed on top of. We expect the API
    # just won't change very often. Similarly for fstab, it might have changed in
    # the target build.
    target_api_version = target_info["recovery_api_version"]
    script = edify_generator.EdifyGenerator(target_api_version, target_info)

    if target_info.oem_props and not OPTIONS.oem_no_mount:
        target_info.WriteMountOemScript(script)

    metadata = GetPackageMetadata(target_info)

    if not OPTIONS.no_signing:
        staging_file = common.MakeTempFile(suffix='.zip')
    else:
        staging_file = output_file

    output_zip = zipfile.ZipFile(staging_file,
                                 "w",
                                 compression=zipfile.ZIP_DEFLATED)

    device_specific = common.DeviceSpecificParams(
        input_zip=input_zip,
        input_version=target_api_version,
        output_zip=output_zip,
        script=script,
        input_tmp=OPTIONS.input_tmp,
        metadata=metadata,
        info_dict=OPTIONS.info_dict)

    assert HasRecoveryPatch(input_zip, info_dict=OPTIONS.info_dict)

    # Assertions (e.g. downgrade check, device properties check).
    #ts = target_info.GetBuildProp("ro.build.date.utc")
    #ts_text = target_info.GetBuildProp("ro.build.date")
    #script.AssertOlderBuild(ts, ts_text)

    target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
    device_specific.FullOTA_Assertions()

    block_diff_dict = GetBlockDifferences(target_zip=input_zip,
                                          source_zip=None,
                                          target_info=target_info,
                                          source_info=None,
                                          device_specific=device_specific)

    # Two-step package strategy (in chronological order, which is *not*
    # the order in which the generated script has things):
    #
    # if stage is not "2/3" or "3/3":
    #    write recovery image to boot partition
    #    set stage to "2/3"
    #    reboot to boot partition and restart recovery
    # else if stage is "2/3":
    #    write recovery image to recovery partition
    #    set stage to "3/3"
    #    reboot to recovery partition and restart recovery
    # else:
    #    (stage must be "3/3")
    #    set stage to ""
    #    do normal full package installation:
    #       wipe and install system, boot image, etc.
    #       set up system to update recovery partition on first boot
    #    complete script normally
    #    (allow recovery to mark itself finished and reboot)

    recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
                                           OPTIONS.input_tmp, "RECOVERY")
    if OPTIONS.two_step:
        if not target_info.get("multistage_support"):
            assert False, "two-step packages not supported by this build"
        fs = target_info["fstab"]["/misc"]
        assert fs.fs_type.upper() == "EMMC", \
            "two-step packages only supported on devices with EMMC /misc partitions"
        bcb_dev = {"bcb_dev": fs.device}
        common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
        script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)

        # Stage 2/3: Write recovery image to /recovery (currently running /boot).
        script.Comment("Stage 2/3")
        script.WriteRawImage("/recovery", "recovery.img")
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
reboot_now("%(bcb_dev)s", "recovery");
else if get_stage("%(bcb_dev)s") == "3/3" then
""" % bcb_dev)

        # Stage 3/3: Make changes.
        script.Comment("Stage 3/3")

    # Dump fingerprints
    script.Print("Target: {}".format(target_info.fingerprint))

    script.AppendExtra(
        "ifelse(is_mounted(\"/system\"), unmount(\"/system\"));")
    device_specific.FullOTA_InstallBegin()

    # All other partitions as well as the data wipe use 10% of the progress, and
    # the update of the system partition takes the remaining progress.
    system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1
    if OPTIONS.wipe_user_data:
        system_progress -= 0.1
    progress_dict = {partition: 0.1 for partition in block_diff_dict}
    progress_dict["system"] = system_progress

    if target_info.get('use_dynamic_partitions') == "true":
        # Use empty source_info_dict to indicate that all partitions / groups must
        # be re-added.
        dynamic_partitions_diff = common.DynamicPartitionsDifference(
            info_dict=OPTIONS.info_dict,
            block_diffs=block_diff_dict.values(),
            progress_dict=progress_dict,
            build_without_vendor=(not HasPartition(input_zip, "vendor")))
        dynamic_partitions_diff.WriteScript(script,
                                            output_zip,
                                            write_verify_script=OPTIONS.verify)
    else:
        for block_diff in block_diff_dict.values():
            block_diff.WriteScript(script,
                                   output_zip,
                                   progress=progress_dict.get(
                                       block_diff.partition),
                                   write_verify_script=OPTIONS.verify)

    CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)

    boot_img = common.GetBootableImage("boot.img", "boot.img",
                                       OPTIONS.input_tmp, "BOOT")
    common.CheckSize(boot_img.data, "boot.img", target_info)
    common.ZipWriteStr(output_zip, "boot.img", boot_img.data)

    script.WriteRawImage("/boot", "boot.img")

    script.ShowProgress(0.1, 10)
    device_specific.FullOTA_InstallEnd()

    if OPTIONS.extra_script is not None:
        script.AppendExtra(OPTIONS.extra_script)

    script.UnmountAll()

    if OPTIONS.wipe_user_data:
        script.ShowProgress(0.1, 10)
        script.FormatPartition("/data")

    if OPTIONS.two_step:
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "");
""" % bcb_dev)
        script.AppendExtra("else\n")

        # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
        script.Comment("Stage 1/3")
        _WriteRecoveryImageToBoot(script, output_zip)

        script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
endif;
endif;
""" % bcb_dev)

    script.SetProgress(1)
    script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
    metadata.required_cache = script.required_cache

    # We haven't written the metadata entry, which will be done in
    # FinalizeMetadata.
    common.ZipClose(output_zip)

    needed_property_files = (NonAbOtaPropertyFiles(), )
    FinalizeMetadata(metadata, staging_file, output_file,
                     needed_property_files)
def GetTargetFilesZipForRetrofitDynamicPartitions(input_file,
                                                  super_block_devices,
                                                  dynamic_partition_list):
    """Returns a target-files.zip for retrofitting dynamic partitions.

  This allows brillo_update_payload to generate an OTA based on the exact
  bits on the block devices. Postinstall is disabled.

  Args:
    input_file: The input target-files.zip filename.
    super_block_devices: The list of super block devices
    dynamic_partition_list: The list of dynamic partitions

  Returns:
    The filename of target-files.zip with *.img replaced with super_*.img for
    each block device in super_block_devices.
  """
    assert super_block_devices, "No super_block_devices are specified."

    replace = {
        'OTA/super_{}.img'.format(dev): 'IMAGES/{}.img'.format(dev)
        for dev in super_block_devices
    }

    target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
    shutil.copyfile(input_file, target_file)

    with zipfile.ZipFile(input_file) as input_zip:
        namelist = input_zip.namelist()

    input_tmp = common.UnzipTemp(input_file, RETROFIT_DAP_UNZIP_PATTERN)

    # Remove partitions from META/ab_partitions.txt that is in
    # dynamic_partition_list but not in super_block_devices so that
    # brillo_update_payload won't generate update for those logical partitions.
    ab_partitions_file = os.path.join(input_tmp, *AB_PARTITIONS.split('/'))
    with open(ab_partitions_file) as f:
        ab_partitions_lines = f.readlines()
        ab_partitions = [line.strip() for line in ab_partitions_lines]
    # Assert that all super_block_devices are in ab_partitions
    super_device_not_updated = [
        partition for partition in super_block_devices
        if partition not in ab_partitions
    ]
    assert not super_device_not_updated, \
        "{} is in super_block_devices but not in {}".format(
            super_device_not_updated, AB_PARTITIONS)
    # ab_partitions -= (dynamic_partition_list - super_block_devices)
    new_ab_partitions = common.MakeTempFile(prefix="ab_partitions",
                                            suffix=".txt")
    with open(new_ab_partitions, 'w') as f:
        for partition in ab_partitions:
            if (partition in dynamic_partition_list
                    and partition not in super_block_devices):
                logger.info("Dropping %s from ab_partitions.txt", partition)
                continue
            f.write(partition + "\n")
    to_delete = [AB_PARTITIONS]

    # Always skip postinstall for a retrofit update.
    to_delete += [POSTINSTALL_CONFIG]

    # Delete dynamic_partitions_info.txt so that brillo_update_payload thinks this
    # is a regular update on devices without dynamic partitions support.
    to_delete += [DYNAMIC_PARTITION_INFO]

    # Remove the existing partition images as well as the map files.
    to_delete += list(replace.values())
    to_delete += ['IMAGES/{}.map'.format(dev) for dev in super_block_devices]

    common.ZipDelete(target_file, to_delete)

    target_zip = zipfile.ZipFile(target_file, 'a', allowZip64=True)

    # Write super_{foo}.img as {foo}.img.
    for src, dst in replace.items():
        assert src in namelist, \
            'Missing {} in {}; {} cannot be written'.format(src, input_file, dst)
        unzipped_file = os.path.join(input_tmp, *src.split('/'))
        common.ZipWrite(target_zip, unzipped_file, arcname=dst)

    # Write new ab_partitions.txt file
    common.ZipWrite(target_zip, new_ab_partitions, arcname=AB_PARTITIONS)

    common.ZipClose(target_zip)

    return target_file
예제 #16
0
def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
    target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
    source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)

    target_api_version = target_info["recovery_api_version"]
    source_api_version = source_info["recovery_api_version"]
    if source_api_version == 0:
        logger.warning(
            "Generating edify script for a source that can't install it.")

    script = edify_generator.EdifyGenerator(source_api_version,
                                            target_info,
                                            fstab=source_info["fstab"])

    if target_info.oem_props or source_info.oem_props:
        if not OPTIONS.oem_no_mount:
            source_info.WriteMountOemScript(script)

    metadata = GetPackageMetadata(target_info, source_info)

    if not OPTIONS.no_signing:
        staging_file = common.MakeTempFile(suffix='.zip')
    else:
        staging_file = output_file

    output_zip = zipfile.ZipFile(staging_file,
                                 "w",
                                 compression=zipfile.ZIP_DEFLATED)

    device_specific = common.DeviceSpecificParams(
        source_zip=source_zip,
        source_version=source_api_version,
        source_tmp=OPTIONS.source_tmp,
        target_zip=target_zip,
        target_version=target_api_version,
        target_tmp=OPTIONS.target_tmp,
        output_zip=output_zip,
        script=script,
        metadata=metadata,
        info_dict=source_info)

    source_boot = common.GetBootableImage("/tmp/boot.img", "boot.img",
                                          OPTIONS.source_tmp, "BOOT",
                                          source_info)
    target_boot = common.GetBootableImage("/tmp/boot.img", "boot.img",
                                          OPTIONS.target_tmp, "BOOT",
                                          target_info)
    updating_boot = (not OPTIONS.two_step
                     and (source_boot.data != target_boot.data))

    target_recovery = common.GetBootableImage("/tmp/recovery.img",
                                              "recovery.img",
                                              OPTIONS.target_tmp, "RECOVERY")

    block_diff_dict = GetBlockDifferences(target_zip=target_zip,
                                          source_zip=source_zip,
                                          target_info=target_info,
                                          source_info=source_info,
                                          device_specific=device_specific)

    CheckVintfIfTrebleEnabled(OPTIONS.target_tmp, target_info)

    # Assertions (e.g. device properties check).
    target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
    device_specific.IncrementalOTA_Assertions()

    # Two-step incremental package strategy (in chronological order,
    # which is *not* the order in which the generated script has
    # things):
    #
    # if stage is not "2/3" or "3/3":
    #    do verification on current system
    #    write recovery image to boot partition
    #    set stage to "2/3"
    #    reboot to boot partition and restart recovery
    # else if stage is "2/3":
    #    write recovery image to recovery partition
    #    set stage to "3/3"
    #    reboot to recovery partition and restart recovery
    # else:
    #    (stage must be "3/3")
    #    perform update:
    #       patch system files, etc.
    #       force full install of new boot image
    #       set up system to update recovery partition on first boot
    #    complete script normally
    #    (allow recovery to mark itself finished and reboot)

    if OPTIONS.two_step:
        if not source_info.get("multistage_support"):
            assert False, "two-step packages not supported by this build"
        fs = source_info["fstab"]["/misc"]
        assert fs.fs_type.upper() == "EMMC", \
            "two-step packages only supported on devices with EMMC /misc partitions"
        bcb_dev = {"bcb_dev": fs.device}
        common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
        script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)

        # Stage 2/3: Write recovery image to /recovery (currently running /boot).
        script.Comment("Stage 2/3")
        script.AppendExtra("sleep(20);\n")
        script.WriteRawImage("/recovery", "recovery.img")
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
reboot_now("%(bcb_dev)s", "recovery");
else if get_stage("%(bcb_dev)s") != "3/3" then
""" % bcb_dev)

        # Stage 1/3: (a) Verify the current system.
        script.Comment("Stage 1/3")

    # Dump fingerprints
    script.Print("Source: {}".format(source_info.fingerprint))
    script.Print("Target: {}".format(target_info.fingerprint))

    script.Print("Verifying current system...")

    device_specific.IncrementalOTA_VerifyBegin()

    WriteFingerprintAssertion(script, target_info, source_info)

    # Check the required cache size (i.e. stashed blocks).
    required_cache_sizes = [
        diff.required_cache for diff in block_diff_dict.values()
    ]
    if updating_boot:
        boot_type, boot_device_expr = common.GetTypeAndDeviceExpr(
            "/boot", source_info)
        d = common.Difference(target_boot, source_boot)
        _, _, d = d.ComputePatch()
        if d is None:
            include_full_boot = True
            common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
        else:
            include_full_boot = False

            logger.info("boot      target: %d  source: %d  diff: %d",
                        target_boot.size, source_boot.size, len(d))

            common.ZipWriteStr(output_zip, "boot.img.p", d)

            target_expr = 'concat("{}:",{},":{}:{}")'.format(
                boot_type, boot_device_expr, target_boot.size,
                target_boot.sha1)
            source_expr = 'concat("{}:",{},":{}:{}")'.format(
                boot_type, boot_device_expr, source_boot.size,
                source_boot.sha1)
            script.PatchPartitionExprCheck(target_expr, source_expr)

            required_cache_sizes.append(target_boot.size)

    if required_cache_sizes:
        script.CacheFreeSpaceCheck(max(required_cache_sizes))

    # Verify the existing partitions.
    for diff in block_diff_dict.values():
        diff.WriteVerifyScript(script, touched_blocks_only=True)

    device_specific.IncrementalOTA_VerifyEnd()

    if OPTIONS.two_step:
        # Stage 1/3: (b) Write recovery image to /boot.
        _WriteRecoveryImageToBoot(script, output_zip)

        script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
else
""" % bcb_dev)

        # Stage 3/3: Make changes.
        script.Comment("Stage 3/3")

    script.Comment("---- start making changes here ----")

    device_specific.IncrementalOTA_InstallBegin()

    progress_dict = {partition: 0.1 for partition in block_diff_dict}
    progress_dict["system"] = 1 - len(block_diff_dict) * 0.1

    if OPTIONS.source_info_dict.get("use_dynamic_partitions") == "true":
        if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
            raise RuntimeError(
                "can't generate incremental that disables dynamic partitions")
        dynamic_partitions_diff = common.DynamicPartitionsDifference(
            info_dict=OPTIONS.target_info_dict,
            source_info_dict=OPTIONS.source_info_dict,
            block_diffs=block_diff_dict.values(),
            progress_dict=progress_dict)
        dynamic_partitions_diff.WriteScript(script,
                                            output_zip,
                                            write_verify_script=OPTIONS.verify)
    else:
        for block_diff in block_diff_dict.values():
            block_diff.WriteScript(script,
                                   output_zip,
                                   progress=progress_dict.get(
                                       block_diff.partition),
                                   write_verify_script=OPTIONS.verify)

    if OPTIONS.two_step:
        common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
        script.WriteRawImage("/boot", "boot.img")
        logger.info("writing full boot image (forced by two-step mode)")

    if not OPTIONS.two_step:
        if updating_boot:
            if include_full_boot:
                logger.info("boot image changed; including full.")
                script.Print("Installing boot image...")
                script.WriteRawImage("/boot", "boot.img")
            else:
                # Produce the boot image by applying a patch to the current
                # contents of the boot partition, and write it back to the
                # partition.
                logger.info("boot image changed; including patch.")
                script.Print("Patching boot image...")
                script.ShowProgress(0.1, 10)
                target_expr = 'concat("{}:",{},":{}:{}")'.format(
                    boot_type, boot_device_expr, target_boot.size,
                    target_boot.sha1)
                source_expr = 'concat("{}:",{},":{}:{}")'.format(
                    boot_type, boot_device_expr, source_boot.size,
                    source_boot.sha1)
                script.PatchPartitionExpr(target_expr, source_expr,
                                          '"boot.img.p"')
        else:
            logger.info("boot image unchanged; skipping.")

    # Do device-specific installation (eg, write radio image).
    device_specific.IncrementalOTA_InstallEnd()

    if OPTIONS.extra_script is not None:
        script.AppendExtra(OPTIONS.extra_script)

    if OPTIONS.wipe_user_data:
        script.Print("Erasing user data...")
        script.FormatPartition("/data")

    if OPTIONS.two_step:
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "");
endif;
endif;
""" % bcb_dev)

    script.SetProgress(1)
    # For downgrade OTAs, we prefer to use the update-binary in the source
    # build that is actually newer than the one in the target build.
    if OPTIONS.downgrade:
        script.AddToZip(source_zip,
                        output_zip,
                        input_path=OPTIONS.updater_binary)
    else:
        script.AddToZip(target_zip,
                        output_zip,
                        input_path=OPTIONS.updater_binary)
    metadata.required_cache = script.required_cache

    # We haven't written the metadata entry yet, which will be handled in
    # FinalizeMetadata().
    common.ZipClose(output_zip)

    # Sign the generated zip package unless no_signing is specified.
    needed_property_files = (NonAbOtaPropertyFiles(), )
    FinalizeMetadata(metadata, staging_file, output_file,
                     needed_property_files)
def GenerateAbOtaPackage(target_file, output_file, source_file=None):
    """Generates an Android OTA package that has A/B update payload."""
    # Stage the output zip package for package signing.
    if not OPTIONS.no_signing:
        staging_file = common.MakeTempFile(suffix='.zip')
    else:
        staging_file = output_file
    output_zip = zipfile.ZipFile(staging_file,
                                 "w",
                                 compression=zipfile.ZIP_DEFLATED)

    if source_file is not None:
        target_info = common.BuildInfo(OPTIONS.target_info_dict,
                                       OPTIONS.oem_dicts)
        source_info = common.BuildInfo(OPTIONS.source_info_dict,
                                       OPTIONS.oem_dicts)
    else:
        target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
        source_info = None

    # Metadata to comply with Android OTA package format.
    metadata = GetPackageMetadata(target_info, source_info)

    if OPTIONS.retrofit_dynamic_partitions:
        target_file = GetTargetFilesZipForRetrofitDynamicPartitions(
            target_file,
            target_info.get("super_block_devices").strip().split(),
            target_info.get("dynamic_partition_list").strip().split())
    elif OPTIONS.skip_postinstall:
        target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)

    # Generate payload.
    payload = Payload()

    # Enforce a max timestamp this payload can be applied on top of.
    if OPTIONS.downgrade:
        max_timestamp = source_info.GetBuildProp("ro.build.date.utc")
    else:
        max_timestamp = metadata["post-timestamp"]
    additional_args = ["--max_timestamp", max_timestamp]

    payload.Generate(target_file, source_file, additional_args)

    # Sign the payload.
    payload_signer = PayloadSigner()
    payload.Sign(payload_signer)

    # Write the payload into output zip.
    payload.WriteToZip(output_zip)

    # Generate and include the secondary payload that installs secondary images
    # (e.g. system_other.img).
    if OPTIONS.include_secondary:
        # We always include a full payload for the secondary slot, even when
        # building an incremental OTA. See the comments for "--include_secondary".
        secondary_target_file = GetTargetFilesZipForSecondaryImages(
            target_file, OPTIONS.skip_postinstall)
        secondary_payload = Payload(secondary=True)
        secondary_payload.Generate(secondary_target_file,
                                   additional_args=additional_args)
        secondary_payload.Sign(payload_signer)
        secondary_payload.WriteToZip(output_zip)

    # If dm-verity is supported for the device, copy contents of care_map
    # into A/B OTA package.
    target_zip = zipfile.ZipFile(target_file, "r")
    if (target_info.get("verity") == "true"
            or target_info.get("avb_enable") == "true"):
        care_map_list = [
            x for x in ["care_map.pb", "care_map.txt"]
            if "META/" + x in target_zip.namelist()
        ]

        # Adds care_map if either the protobuf format or the plain text one exists.
        if care_map_list:
            care_map_name = care_map_list[0]
            care_map_data = target_zip.read("META/" + care_map_name)
            # In order to support streaming, care_map needs to be packed as
            # ZIP_STORED.
            common.ZipWriteStr(output_zip,
                               care_map_name,
                               care_map_data,
                               compress_type=zipfile.ZIP_STORED)
        else:
            logger.warning("Cannot find care map file in target_file package")

    common.ZipClose(target_zip)

    CheckVintfIfTrebleEnabled(target_file, target_info)

    # We haven't written the metadata entry yet, which will be handled in
    # FinalizeMetadata().
    common.ZipClose(output_zip)

    # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
    # all the info of the latter. However, system updaters and OTA servers need to
    # take time to switch to the new flag. We keep both of the flags for
    # P-timeframe, and will remove StreamingPropertyFiles in later release.
    needed_property_files = (
        AbOtaPropertyFiles(),
        StreamingPropertyFiles(),
    )
    FinalizeMetadata(metadata, staging_file, output_file,
                     needed_property_files)
예제 #18
0
def VerifyAbOtaPayload(cert, package):
    """Verifies the payload and metadata signatures in an A/B OTA payload."""
    def VerifySignatureBlob(hash_file, blob):
        """Verifies the input hash_file against the signature blob."""
        signatures = Signatures()
        signatures.ParseFromString(blob)

        extracted_sig_file = common.MakeTempFile(prefix='extracted-sig-',
                                                 suffix='.bin')
        # In Android, we only expect one signature.
        assert len(signatures.signatures) == 1, \
            'Invalid number of signatures: %d' % len(signatures.signatures)
        signature = signatures.signatures[0]
        length = len(signature.data)
        assert length == 256, 'Invalid signature length %d' % (length, )
        with open(extracted_sig_file, 'w') as f:
            f.write(signature.data)

        # Verify the signature file extracted from the payload, by reversing the
        # signing operation. Alternatively, this can be done by calling 'openssl
        # rsautl -verify -certin -inkey <cert.pem> -in <extracted_sig_file> -out
        # <output>', then to assert that
        # <output> == SHA-256 DigestInfo prefix || <hash_file>.
        cmd = [
            'openssl', 'pkeyutl', '-verify', '-certin', '-inkey', cert,
            '-pkeyopt', 'digest:sha256', '-in', hash_file, '-sigfile',
            extracted_sig_file
        ]
        p = common.Run(cmd, stdout=subprocess.PIPE)
        result, _ = p.communicate()

        # https://github.com/openssl/openssl/pull/3213
        # 'openssl pkeyutl -verify' (prior to 1.1.0) returns non-zero return code,
        # even on successful verification. To avoid the false alarm with older
        # openssl, check the output directly.
        assert result.strip(
        ) == 'Signature Verified Successfully', result.strip()

    package_zip = zipfile.ZipFile(package, 'r')
    if 'payload.bin' not in package_zip.namelist():
        common.ZipClose(package_zip)
        return

    print('Verifying A/B OTA payload signatures...')

    package_dir = tempfile.mkdtemp(prefix='package-')
    common.OPTIONS.tempfiles.append(package_dir)

    payload_file = package_zip.extract('payload.bin', package_dir)
    payload = Payload(open(payload_file, 'rb'))
    payload.Init()

    # Extract the payload hash and metadata hash from the payload.bin.
    payload_hash_file = common.MakeTempFile(prefix='hash-', suffix='.bin')
    metadata_hash_file = common.MakeTempFile(prefix='hash-', suffix='.bin')
    cmd = [
        'brillo_update_payload', 'hash', '--unsigned_payload', payload_file,
        '--signature_size', '256', '--metadata_hash_file', metadata_hash_file,
        '--payload_hash_file', payload_hash_file
    ]
    p = common.Run(cmd, stdout=subprocess.PIPE)
    p.communicate()
    assert p.returncode == 0, 'brillo_update_payload hash failed'

    # Payload signature verification.
    assert payload.manifest.HasField('signatures_offset')
    payload_signature = payload.ReadDataBlob(
        payload.manifest.signatures_offset, payload.manifest.signatures_size)
    VerifySignatureBlob(payload_hash_file, payload_signature)

    # Metadata signature verification.
    metadata_signature = payload.ReadDataBlob(
        -payload.header.metadata_signature_len,
        payload.header.metadata_signature_len)
    VerifySignatureBlob(metadata_hash_file, metadata_signature)

    common.ZipClose(package_zip)

    # Verified successfully upon reaching here.
    print('\nPayload signatures VERIFIED\n\n')
예제 #19
0
 def setUp(self):
     self.file_path = common.MakeTempFile()
     self.data = os.urandom(4096 * 4)
     with open(self.file_path, 'w') as f:
         f.write(self.data)
     self.file = FileImage(self.file_path)
예제 #20
0
파일: build_image.py 프로젝트: HavocQ/build
def ConvertBlockMapToBaseFs(block_map_file):
    base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
    convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
    common.RunAndCheckOutput(convert_command)
    return base_fs_file
def SignApex(apex_data,
             payload_key,
             container_key,
             container_pw,
             codename_to_api_level_map,
             signing_args=None):
    """Signs the current APEX with the given payload/container keys.

  Args:
    apex_data: Raw APEX data.
    payload_key: The path to payload signing key (w/ extension).
    container_key: The path to container signing key (w/o extension).
    container_pw: The matching password of the container_key, or None.
    codename_to_api_level_map: A dict that maps from codename to API level.
    signing_args: Additional args to be passed to the payload signer.

  Returns:
    The path to the signed APEX file.
  """
    apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex')
    with open(apex_file, 'wb') as apex_fp:
        apex_fp.write(apex_data)

    APEX_PAYLOAD_IMAGE = 'apex_payload.img'
    APEX_PUBKEY = 'apex_pubkey'

    # 1a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
    # payload_key.
    payload_dir = common.MakeTempDir(prefix='apex-payload-')
    with zipfile.ZipFile(apex_file) as apex_fd:
        payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir)

    payload_info = apex_utils.ParseApexPayloadInfo(payload_file)
    apex_utils.SignApexPayload(payload_file, payload_key,
                               payload_info['apex.key'],
                               payload_info['Algorithm'], payload_info['Salt'],
                               signing_args)

    # 1b. Update the embedded payload public key.
    payload_public_key = common.ExtractAvbPublicKey(payload_key)

    common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE)
    common.ZipDelete(apex_file, APEX_PUBKEY)
    apex_zip = zipfile.ZipFile(apex_file, 'a')
    common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE)
    common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY)
    common.ZipClose(apex_zip)

    # 2. Align the files at page boundary (same as in apexer).
    aligned_apex = common.MakeTempFile(prefix='apex-container-',
                                       suffix='.apex')
    common.RunAndCheckOutput(
        ['zipalign', '-f', '4096', apex_file, aligned_apex])

    # 3. Sign the APEX container with container_key.
    signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex')

    # Specify the 4K alignment when calling SignApk.
    extra_signapk_args = OPTIONS.extra_signapk_args[:]
    extra_signapk_args.extend(['-a', '4096'])

    common.SignFile(aligned_apex,
                    signed_apex,
                    container_key,
                    container_pw,
                    codename_to_api_level_map=codename_to_api_level_map,
                    extra_signapk_args=extra_signapk_args)

    return signed_apex
 def test_ApexApkSigner_apkKeyNotPresent(self):
     apex_path = common.MakeTempFile(suffix='.apex')
     shutil.copy(self.apex_with_apk, apex_path)
     signer = apex_utils.ApexApkSigner(apex_path, None, None)
     self.assertRaises(apex_utils.ApexSigningError, signer.ProcessApexFile,
                       {}, self.payload_key)
예제 #23
0
def WriteFullOTAPackage(input_zip, output_file):
    target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)

    # We don't know what version it will be installed on top of. We expect the API
    # just won't change very often. Similarly for fstab, it might have changed in
    # the target build.
    target_api_version = target_info["recovery_api_version"]
    script = edify_generator.EdifyGenerator(target_api_version, target_info)

    if target_info.oem_props and not OPTIONS.oem_no_mount:
        target_info.WriteMountOemScript(script)

    metadata = GetPackageMetadata(target_info)

    if not OPTIONS.no_signing:
        staging_file = common.MakeTempFile(suffix='.zip')
    else:
        staging_file = output_file

    output_zip = zipfile.ZipFile(staging_file,
                                 "w",
                                 compression=zipfile.ZIP_DEFLATED)

    device_specific = common.DeviceSpecificParams(
        input_zip=input_zip,
        input_version=target_api_version,
        output_zip=output_zip,
        script=script,
        input_tmp=OPTIONS.input_tmp,
        metadata=metadata,
        info_dict=OPTIONS.info_dict)

    assert HasRecoveryPatch(input_zip, info_dict=OPTIONS.info_dict)

    # Assertions (e.g. downgrade check, device properties check).
    #ts = target_info.GetBuildProp("ro.build.date.utc")
    #ts_text = target_info.GetBuildProp("ro.build.date")
    #script.AssertOlderBuild(ts, ts_text)

    target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
    device_specific.FullOTA_Assertions()

    block_diff_dict = GetBlockDifferences(target_zip=input_zip,
                                          source_zip=None,
                                          target_info=target_info,
                                          source_info=None,
                                          device_specific=device_specific)

    # Two-step package strategy (in chronological order, which is *not*
    # the order in which the generated script has things):
    #
    # if stage is not "2/3" or "3/3":
    #    write recovery image to boot partition
    #    set stage to "2/3"
    #    reboot to boot partition and restart recovery
    # else if stage is "2/3":
    #    write recovery image to recovery partition
    #    set stage to "3/3"
    #    reboot to recovery partition and restart recovery
    # else:
    #    (stage must be "3/3")
    #    set stage to ""
    #    do normal full package installation:
    #       wipe and install system, boot image, etc.
    #       set up system to update recovery partition on first boot
    #    complete script normally
    #    (allow recovery to mark itself finished and reboot)

    recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
                                           OPTIONS.input_tmp, "RECOVERY")
    if OPTIONS.two_step:
        if not target_info.get("multistage_support"):
            assert False, "two-step packages not supported by this build"
        fs = target_info["fstab"]["/misc"]
        assert fs.fs_type.upper() == "EMMC", \
            "two-step packages only supported on devices with EMMC /misc partitions"
        bcb_dev = {"bcb_dev": fs.device}
        common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
        script.AppendExtra("""
if get_stage("%(bcb_dev)s") == "2/3" then
""" % bcb_dev)

        # Stage 2/3: Write recovery image to /recovery (currently running /boot).
        script.Comment("Stage 2/3")
        script.WriteRawImage("/recovery", "recovery.img")
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "3/3");
reboot_now("%(bcb_dev)s", "recovery");
else if get_stage("%(bcb_dev)s") == "3/3" then
""" % bcb_dev)

        # Stage 3/3: Make changes.
        script.Comment("Stage 3/3")

    # Dump fingerprints
    script.Print("Target: {}".format(target_info.fingerprint))

    #Print ASCII
    script.Print("--------------------------------------------------")
    script.Print("|                    __                          |")
    script.Print("|        /\  _  _ _ |_   |_ _  _  _| _  _|       |")
    script.Print("|       /--\(_)_)|_)|__><|_(-`| )(_|(-`(_|       |")
    script.Print("|                |                               |")
    script.Print("|                                                |")
    script.Print("--------------------------------------------------")
    script.Print("--------------------------------------------------")
    script.Print("|                  By:-TeamAEX                   |")
    script.Print("--------------------------------------------------")
    script.Print(" ")
    script.AppendExtra("sleep (2);")

    device_specific.FullOTA_InstallBegin()

    CopyInstallTools(output_zip)
    script.UnpackPackageDir("install", "/tmp/install")
    script.SetPermissionsRecursive("/tmp/install", 0, 0, 0o755, 0o644, None,
                                   None)
    script.SetPermissionsRecursive("/tmp/install/bin", 0, 0, 0o755, 0o755,
                                   None, None)

    if target_info.get("system_root_image") == "true":
        sysmount = "/"
    else:
        sysmount = "/system"

    if OPTIONS.backuptool:
        script.Print("BackupTools: starting backup script")
        script.RunBackup("backup", sysmount,
                         target_info.get('use_dynamic_partitions') == "true")
        script.Print("BackupTools: DONE! Now real installation will begin")

    # All other partitions as well as the data wipe use 10% of the progress, and
    # the update of the system partition takes the remaining progress.
    system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1

    if target_info.GetBuildProp("ro.extended.display.version") is not None:
        buildid = target_info.GetBuildProp("ro.extended.display.version")
        buildidn = target_info.GetBuildProp("ro.build.id")
        buildday = target_info.GetBuildProp("ro.build.date")
        securep = target_info.GetBuildProp("ro.build.version.security_patch")
        device = target_info.GetBuildProp("ro.aex.device")
        androidver = target_info.GetBuildProp("ro.build.version.release")
        manifacturer = target_info.GetBuildProp("ro.product.manufacturer")
        sdkver = target_info.GetBuildProp("ro.build.version.sdk")
        script.Print("-------------------- Software --------------------")
        script.Print(" OS version: %s" % (buildid))
        script.Print("")
        script.Print(" Android version: %s" % (androidver))
        script.Print("")
        script.Print(" Security patch: %s" % (securep))
        script.Print("")
        script.Print(" SDK version: %s" % (sdkver))
        script.Print("")
        script.Print(" Build ID: %s" % (buildidn))
        script.Print("")
        script.Print(" Build date: %s" % (buildday))
        script.Print("-------------------- Hardware --------------------")
        script.Print(" Device codename: %s" % (device))
        script.Print("")
        script.Print(" Manufacturer: %s" % (manifacturer))
        script.Print("")
        script.Print("--------------------------------------------------")

    if OPTIONS.wipe_user_data:
        system_progress -= 0.1
    progress_dict = {partition: 0.1 for partition in block_diff_dict}
    progress_dict["system"] = system_progress

    if target_info.get('use_dynamic_partitions') == "true":
        # Use empty source_info_dict to indicate that all partitions / groups must
        # be re-added.
        dynamic_partitions_diff = common.DynamicPartitionsDifference(
            info_dict=OPTIONS.info_dict,
            block_diffs=block_diff_dict.values(),
            progress_dict=progress_dict)
        dynamic_partitions_diff.WriteScript(script,
                                            output_zip,
                                            write_verify_script=OPTIONS.verify)
    else:
        for block_diff in block_diff_dict.values():
            block_diff.WriteScript(script,
                                   output_zip,
                                   progress=progress_dict.get(
                                       block_diff.partition),
                                   write_verify_script=OPTIONS.verify)

    CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)

    boot_img = common.GetBootableImage("boot.img", "boot.img",
                                       OPTIONS.input_tmp, "BOOT")
    common.CheckSize(boot_img.data, "boot.img", target_info)
    common.ZipWriteStr(output_zip, "boot.img", boot_img.data)

    device_specific.FullOTA_PostValidate()

    if OPTIONS.backuptool:
        script.ShowProgress(0.02, 10)
        script.Print("BackupTools: Restoring backup")
        script.RunBackup("restore", sysmount,
                         target_info.get('use_dynamic_partitions') == "true")
        script.Print("BackupTools: DONE!")

    script.WriteRawImage("/boot", "boot.img")

    script.ShowProgress(0.1, 10)
    device_specific.FullOTA_InstallEnd()

    if OPTIONS.extra_script is not None:
        script.AppendExtra(OPTIONS.extra_script)

    script.UnmountAll()

    if OPTIONS.wipe_user_data:
        script.ShowProgress(0.1, 10)
        script.FormatPartition("/data")

    if OPTIONS.two_step:
        script.AppendExtra("""
set_stage("%(bcb_dev)s", "");
""" % bcb_dev)
        script.AppendExtra("else\n")

        # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
        script.Comment("Stage 1/3")
        _WriteRecoveryImageToBoot(script, output_zip)

        script.AppendExtra("""
set_stage("%(bcb_dev)s", "2/3");
reboot_now("%(bcb_dev)s", "");
endif;
endif;
""" % bcb_dev)

    script.SetProgress(1)
    script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
    metadata.required_cache = script.required_cache

    # We haven't written the metadata entry, which will be done in
    # FinalizeMetadata.
    common.ZipClose(output_zip)

    needed_property_files = (NonAbOtaPropertyFiles(), )
    FinalizeMetadata(metadata, staging_file, output_file,
                     needed_property_files)
  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
  if os.path.exists(prebuilt_path):
    print("system.img already exists in %s, no need to rebuild..." % prefix)
    return

  def output_sink(fn, data):
    ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w")
    ofile.write(data)
    ofile.close()

  if rebuild_recovery:
    print("Building new recovery patch")
    common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                             boot_img, info_dict=OPTIONS.info_dict)

  block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
  imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
                        block_list=block_list)
  common.ZipWrite(output_zip, imgname, prefix + "system.img")
  common.ZipWrite(output_zip, block_list, prefix + "system.map")


def BuildSystem(input_dir, info_dict, block_list=None):
  """Build the (sparse) system image and return the name of a temp
  file containing it."""
  return CreateImage(input_dir, info_dict, "system", block_list=block_list)


def AddVendor(output_zip, prefix="IMAGES/"):
  """Turn the contents of VENDOR into a vendor image and store in it
  output_zip."""
예제 #25
0
def construct_sparse_image(chunks):
  """Returns a sparse image file constructed from the given chunks.

  From system/core/libsparse/sparse_format.h.
  typedef struct sparse_header {
    __le32 magic;  // 0xed26ff3a
    __le16 major_version;  // (0x1) - reject images with higher major versions
    __le16 minor_version;  // (0x0) - allow images with higer minor versions
    __le16 file_hdr_sz;  // 28 bytes for first revision of the file format
    __le16 chunk_hdr_sz;  // 12 bytes for first revision of the file format
    __le32 blk_sz;  // block size in bytes, must be a multiple of 4 (4096)
    __le32 total_blks;  // total blocks in the non-sparse output image
    __le32 total_chunks;  // total chunks in the sparse input image
    __le32 image_checksum;  // CRC32 checksum of the original data, counting
                            // "don't care" as 0. Standard 802.3 polynomial,
                            // use a Public Domain table implementation
  } sparse_header_t;

  typedef struct chunk_header {
    __le16 chunk_type;  // 0xCAC1 -> raw; 0xCAC2 -> fill;
                        // 0xCAC3 -> don't care
    __le16 reserved1;
    __le32 chunk_sz;  // in blocks in output image
    __le32 total_sz;  // in bytes of chunk input file including chunk header
                      // and data
  } chunk_header_t;

  Args:
    chunks: A list of chunks to be written. Each entry should be a tuple of
        (chunk_type, block_number).

  Returns:
    Filename of the created sparse image.
  """
  SPARSE_HEADER_MAGIC = 0xED26FF3A
  SPARSE_HEADER_FORMAT = "<I4H4I"
  CHUNK_HEADER_FORMAT = "<2H2I"

  sparse_image = common.MakeTempFile(prefix='sparse-', suffix='.img')
  with open(sparse_image, 'wb') as fp:
    fp.write(struct.pack(
        SPARSE_HEADER_FORMAT, SPARSE_HEADER_MAGIC, 1, 0, 28, 12, 4096,
        sum(chunk[1] for chunk in chunks),
        len(chunks), 0))

    for chunk in chunks:
      data_size = 0
      if chunk[0] == 0xCAC1:
        data_size = 4096 * chunk[1]
      elif chunk[0] == 0xCAC2:
        data_size = 4
      elif chunk[0] == 0xCAC3:
        pass
      else:
        assert False, "Unsupported chunk type: {}".format(chunk[0])

      fp.write(struct.pack(
          CHUNK_HEADER_FORMAT, chunk[0], 0, chunk[1], data_size + 12))
      if data_size != 0:
        fp.write(os.urandom(data_size))

  return sparse_image
예제 #26
0
def SignApex(avbtool,
             apex_data,
             payload_key,
             container_key,
             container_pw,
             apk_keys,
             codename_to_api_level_map,
             no_hashtree,
             signing_args=None):
    """Signs the current APEX with the given payload/container keys.

  Args:
    apex_data: Raw APEX data.
    payload_key: The path to payload signing key (w/ extension).
    container_key: The path to container signing key (w/o extension).
    container_pw: The matching password of the container_key, or None.
    apk_keys: A dict that holds the signing keys for apk files.
    codename_to_api_level_map: A dict that maps from codename to API level.
    no_hashtree: Don't include hashtree in the signed APEX.
    signing_args: Additional args to be passed to the payload signer.

  Returns:
    The path to the signed APEX file.
  """
    apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex')
    with open(apex_file, 'wb') as apex_fp:
        apex_fp.write(apex_data)

    APEX_PAYLOAD_IMAGE = 'apex_payload.img'
    APEX_PUBKEY = 'apex_pubkey'

    # 1. Extract the apex payload image and sign the containing apk files. Repack
    # the apex file after signing.
    payload_public_key = common.ExtractAvbPublicKey(avbtool, payload_key)
    apk_signer = ApexApkSigner(apex_file, container_pw,
                               codename_to_api_level_map)
    apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key,
                                           payload_public_key)

    # 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
    # payload_key.
    payload_dir = common.MakeTempDir(prefix='apex-payload-')
    with zipfile.ZipFile(apex_file) as apex_fd:
        payload_file = apex_fd.extract(APEX_PAYLOAD_IMAGE, payload_dir)
        zip_items = apex_fd.namelist()

    payload_info = ParseApexPayloadInfo(avbtool, payload_file)
    SignApexPayload(avbtool, payload_file, payload_key,
                    payload_info['apex.key'], payload_info['Algorithm'],
                    payload_info['Salt'], no_hashtree, signing_args)

    # 2b. Update the embedded payload public key.

    common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE)
    if APEX_PUBKEY in zip_items:
        common.ZipDelete(apex_file, APEX_PUBKEY)
    apex_zip = zipfile.ZipFile(apex_file, 'a')
    common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE)
    common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY)
    common.ZipClose(apex_zip)

    # 3. Align the files at page boundary (same as in apexer).
    aligned_apex = common.MakeTempFile(prefix='apex-container-',
                                       suffix='.apex')
    common.RunAndCheckOutput(
        ['zipalign', '-f', '4096', apex_file, aligned_apex])

    # 4. Sign the APEX container with container_key.
    signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.apex')

    # Specify the 4K alignment when calling SignApk.
    extra_signapk_args = OPTIONS.extra_signapk_args[:]
    extra_signapk_args.extend(['-a', '4096'])

    common.SignFile(aligned_apex,
                    signed_apex,
                    container_key,
                    container_pw,
                    codename_to_api_level_map=codename_to_api_level_map,
                    extra_signapk_args=extra_signapk_args)

    return signed_apex
예제 #27
0
    def RepackApexPayload(self,
                          payload_dir,
                          payload_key,
                          payload_public_key,
                          signing_args=None):
        """Rebuilds the apex file with the updated payload directory."""
        apex_dir = common.MakeTempDir()
        # Extract the apex file and reuse its meta files as repack parameters.
        common.UnzipToDir(self.apex_path, apex_dir)

        android_jar_path = common.OPTIONS.android_jar_path
        if not android_jar_path:
            android_jar_path = os.path.join(
                os.environ.get('ANDROID_BUILD_TOP', ''), 'prebuilts', 'sdk',
                'current', 'public', 'android.jar')
            logger.warning(
                'android_jar_path not found in options, falling back to'
                ' use %s', android_jar_path)

        arguments_dict = {
            'manifest': os.path.join(apex_dir, 'apex_manifest.pb'),
            'build_info': os.path.join(apex_dir, 'apex_build_info.pb'),
            'android_jar_path': android_jar_path,
            'key': payload_key,
            'pubkey': payload_public_key,
        }
        for filename in arguments_dict.values():
            assert os.path.exists(filename), 'file {} not found'.format(
                filename)

        # The repack process will add back these files later in the payload image.
        for name in ['apex_manifest.pb', 'apex_manifest.json', 'lost+found']:
            path = os.path.join(payload_dir, name)
            if os.path.isfile(path):
                os.remove(path)
            elif os.path.isdir(path):
                shutil.rmtree(path)

        repacked_apex = common.MakeTempFile(suffix='.apex')
        repack_cmd = [
            'apexer', '--force', '--include_build_info',
            '--do_not_check_keyname', '--apexer_tool_path',
            os.getenv('PATH')
        ]
        for key, val in arguments_dict.items():
            repack_cmd.extend(['--' + key, val])
        # Add quote to the signing_args as we will pass
        # --signing_args "--signing_helper_with_files=%path" to apexer
        if signing_args:
            repack_cmd.extend(['--signing_args', '"{}"'.format(signing_args)])
        # optional arguments for apex repacking
        manifest_json = os.path.join(apex_dir, 'apex_manifest.json')
        if os.path.exists(manifest_json):
            repack_cmd.extend(['--manifest_json', manifest_json])
        assets_dir = os.path.join(apex_dir, 'assets')
        if os.path.isdir(assets_dir):
            repack_cmd.extend(['--assets_dir', assets_dir])
        repack_cmd.extend([payload_dir, repacked_apex])
        if OPTIONS.verbose:
            repack_cmd.append('-v')
        common.RunAndCheckOutput(repack_cmd)

        return repacked_apex