Пример #1
0
    def __init__(self, target):
        AmberRepo.__init__(self, target)
        self._with_count = 0

        self._amber_root = tempfile.mkdtemp()
        pm_tool = common.GetHostToolPathFromPlatform('pm')
        subprocess.check_call([pm_tool, 'newrepo', '-repo', self._amber_root])
        logging.info('Creating and serving temporary Amber root: {}.'.format(
            self._amber_root))

        serve_port = common.GetAvailableTcpPort()
        self._pm_serve_task = subprocess.Popen([
            pm_tool, 'serve', '-d',
            os.path.join(self._amber_root, 'repository'), '-l',
            ':%d' % serve_port, '-q'
        ])

        # Block until "pm serve" starts serving HTTP traffic at |serve_port|.
        timeout = time.time() + _PM_SERVE_LIVENESS_TIMEOUT_SECS
        while True:
            try:
                urllib2.urlopen('http://localhost:%d' % serve_port,
                                timeout=1).read()
                break
            except urllib2.URLError:
                logging.info('Waiting until \'pm serve\' is up...')

            if time.time() >= timeout:
                raise Exception('Timed out while waiting for \'pm serve\'.')

            time.sleep(1)

        remote_port = common.ConnectPortForwardingTask(target, serve_port, 0)
        self._RegisterAmberRepository(self._amber_root, remote_port)
Пример #2
0
 def PublishPackage(self, package_path):
     pm_tool = common.GetHostToolPathFromPlatform('pm')
     subprocess.check_call([
         pm_tool, 'publish', '-a', '-f', package_path, '-r',
         self.GetPath(), '-vt', '-v'
     ],
                           stderr=subprocess.STDOUT)
Пример #3
0
def _EnsureBlobstoreQcowAndReturnPath(output_dir, target_arch):
    """Returns a file containing the Fuchsia blobstore in a QCOW format,
  with extra buffer space added for growth."""

    qimg_tool = os.path.join(common.GetEmuRootForPlatform('qemu'), 'bin',
                             'qemu-img')
    fvm_tool = common.GetHostToolPathFromPlatform('fvm')
    blobstore_path = boot_data.GetTargetFile('storage-full.blk', target_arch,
                                             'qemu')
    qcow_path = os.path.join(output_dir, 'gen', 'blobstore.qcow')

    # Check a hash of the blobstore to determine if we can re-use an existing
    # extended version of it.
    blobstore_hash_path = os.path.join(output_dir, 'gen', 'blobstore.hash')
    current_blobstore_hash = _ComputeFileHash(blobstore_path)

    if os.path.exists(blobstore_hash_path) and os.path.exists(qcow_path):
        if current_blobstore_hash == open(blobstore_hash_path, 'r').read():
            return qcow_path

    # Add some extra room for growth to the Blobstore volume.
    # Fuchsia is unable to automatically extend FVM volumes at runtime so the
    # volume enlargement must be performed prior to QEMU startup.

    # The 'fvm' tool only supports extending volumes in-place, so make a
    # temporary copy of 'blobstore.bin' before it's mutated.
    extended_blobstore = tempfile.NamedTemporaryFile()
    shutil.copyfile(blobstore_path, extended_blobstore.name)
    subprocess.check_call([
        fvm_tool, extended_blobstore.name, 'extend', '--length',
        str(EXTENDED_BLOBSTORE_SIZE), blobstore_path
    ])

    # Construct a QCOW image from the extended, temporary FVM volume.
    # The result will be retained in the build output directory for re-use.
    qemu_img_cmd = [
        qimg_tool, 'convert', '-f', 'raw', '-O', 'qcow2', '-c',
        extended_blobstore.name, qcow_path
    ]
    # TODO(crbug.com/1046861): Remove arm64 call with retries when bug is fixed.
    if common.GetHostArchFromPlatform() == 'arm64':
        qemu_image.ExecQemuImgWithRetry(qemu_img_cmd)
    else:
        subprocess.check_call(qemu_img_cmd)

    # Write out a hash of the original blobstore file, so that subsequent runs
    # can trivially check if a cached extended FVM volume is available for reuse.
    with open(blobstore_hash_path, 'w') as blobstore_hash_file:
        blobstore_hash_file.write(current_blobstore_hash)

    return qcow_path
Пример #4
0
    def __init__(self, target):
        super(ManagedPkgRepo, self).__init__()
        self._with_count = 0
        self._target = target

        self._pkg_root = tempfile.mkdtemp()
        pm_tool = common.GetHostToolPathFromPlatform('pm')
        subprocess.check_call([pm_tool, 'newrepo', '-repo', self._pkg_root])
        logging.debug(
            'Creating and serving temporary package root: {}.'.format(
                self._pkg_root))

        with tempfile.NamedTemporaryFile() as pm_port_file:
            # Flags for `pm serve`:
            # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/cmd/pm/serve/serve.go
            self._pm_serve_task = subprocess.Popen([
                pm_tool,
                'serve',
                '-d',
                os.path.join(self._pkg_root, 'repository'),
                '-c',
                '2',  # Use config.json format v2, the default for pkgctl.
                '-q',  # Don't log transfer activity.
                '-l',
                ':0',  # Bind to ephemeral port.
                '-f',
                pm_port_file.name  # Publish port number to |pm_port_file|.
            ])

            # Busywait until 'pm serve' starts the server and publishes its port to
            # a temporary file.
            timeout = time.time() + _PM_SERVE_LISTEN_TIMEOUT_SECS
            serve_port = None
            while not serve_port:
                if time.time() > timeout:
                    raise Exception(
                        'Timeout waiting for \'pm serve\' to publish its port.'
                    )

                with open(pm_port_file.name, 'r',
                          encoding='utf8') as serve_port_file:
                    serve_port = serve_port_file.read()

                time.sleep(_PM_SERVE_POLL_INTERVAL)

            serve_port = int(serve_port)
            logging.debug('pm serve is active on port {}.'.format(serve_port))

        remote_port = common.ConnectPortForwardingTask(target, serve_port, 0)
        self._RegisterPkgRepository(self._pkg_root, remote_port)
Пример #5
0
def GetBootImage(output_dir, target_arch, target_type):
  """"Gets a path to the Zircon boot image, with the SSH client public key
  added."""
  ProvisionSSH()
  pubkey_path = _GetPubKeyPath()
  zbi_tool = common.GetHostToolPathFromPlatform('zbi')
  image_source_path = GetTargetFile('zircon-a.zbi', target_arch, target_type)
  image_dest_path = os.path.join(output_dir, 'gen', 'fuchsia-with-keys.zbi')

  cmd = [ zbi_tool, '-o', image_dest_path, image_source_path,
          '-e', 'data/ssh/authorized_keys=' + pubkey_path ]
  subprocess.check_call(cmd)

  return image_dest_path
Пример #6
0
    def InstallPackage(self, package_paths):
        """Installs a package and it's dependencies on the device. If the package is
    already installed then it will be updated to the new version.

    package_paths: Paths to the .far files to install.
    """
        with self.GetPkgRepo() as pkg_repo:
            # Publish all packages to the serving TUF repository under |tuf_root|.
            for package_path in package_paths:
                pkg_repo.PublishPackage(package_path)

            # Resolve all packages, to have them pulled into the device/VM cache.
            for package_path in package_paths:
                package_name, package_version = _GetPackageInfo(package_path)
                logging.info('Resolving %s into cache.', package_name)
                return_code = self.RunCommand(
                    [
                        'pkgctl', 'resolve',
                        _GetPackageUri(package_name), '>/dev/null'
                    ],
                    timeout_secs=_INSTALL_TIMEOUT_SECS)
                if return_code != 0:
                    raise Exception('Error {} while resolving {}.'.format(
                        return_code, package_name))

            # Verify that the newly resolved versions of packages are reported.
            for package_path in package_paths:
                # Use pkgctl get-hash to determine which version will be resolved.
                package_name, package_version = _GetPackageInfo(package_path)
                pkgctl = self.RunCommandPiped(
                    ['pkgctl', 'get-hash',
                     _GetPackageUri(package_name)],
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE)
                pkgctl_out, pkgctl_err = pkgctl.communicate()

                # Read the expected version from the meta.far Merkel hash file alongside
                # the package's FAR.
                meta_far_path = os.path.join(os.path.dirname(package_path),
                                             'meta.far')
                meta_far_merkel = subprocess.check_output([
                    common.GetHostToolPathFromPlatform('merkleroot'),
                    meta_far_path
                ]).split()[0]
                if pkgctl_out != meta_far_merkel:
                    raise Exception(
                        'Hash mismatch for %s after resolve (%s vs %s).' %
                        (package_name, pkgctl_out, meta_far_merkel))
Пример #7
0
 def PublishPackage(self, package_path):
     pm_tool = common.GetHostToolPathFromPlatform('pm')
     # Flags for `pm publish`:
     # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/cmd/pm/publish/publish.go
     # https://fuchsia.googlesource.com/fuchsia/+/refs/heads/main/src/sys/pkg/bin/pm/repo/config.go
     # -a: Publish archived package
     # -f <path>: Path to packages
     # -r <path>: Path to repository
     # -vt: Repo versioning based on time rather than monotonic version number
     #      increase
     # -v: Verbose output
     subprocess.check_call([
         pm_tool, 'publish', '-a', '-f', package_path, '-r',
         self.GetPath(), '-vt', '-v'
     ],
                           stderr=subprocess.STDOUT)
Пример #8
0
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import time
import threading
import uuid

from symbolizer import RunSymbolizer
from symbolizer import SymbolizerFilter

FAR = common.GetHostToolPathFromPlatform('far')

# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5


def _AttachKernelLogReader(target):
    """Attaches a kernel log reader as a long-running SSH task."""

    logging.info('Attaching kernel logger.')
    return target.RunCommandPiped(['dlog', '-f'],
                                  stdin=open(os.devnull, 'r'),
                                  stdout=subprocess.PIPE)


def _BuildIdsPaths(package_paths):