def test_share(container):
    """Test container can share files with the host.

    @param container: The test container.
    """
    logging.info('Test files written to result directory can be accessed '
                 'from the host running the container..')
    host_test_script = os.path.join(RESULT_PATH, TEST_SCRIPT)
    with open(host_test_script, 'w') as script:
        if utils.is_moblab():
            script.write(TEST_SCRIPT_CONTENT % {'ts_mon_test': ''})
        else:
            script.write(TEST_SCRIPT_CONTENT %
                         {'ts_mon_test': TEST_SCRIPT_CONTENT_TS_MON})

    container_result_path = lxc.RESULT_DIR_FMT % TEST_JOB_FOLDER
    container_test_script = os.path.join(container_result_path, TEST_SCRIPT)
    container_test_script_dest = os.path.join('/usr/local/autotest/utils/',
                                              TEST_SCRIPT)
    container_test_log = os.path.join(container_result_path, TEST_LOG)
    host_test_log = os.path.join(RESULT_PATH, TEST_LOG)
    # Move the test script out of result folder as it needs to import common.
    container.attach_run('mv %s %s' %
                         (container_test_script, container_test_script_dest))
    container.attach_run('python %s %s' %
                         (container_test_script_dest, container_test_log))
    if not os.path.exists(host_test_log):
        raise Exception(
            'Results created in container can not be accessed from '
            'the host.')
    with open(host_test_log, 'r') as log:
        if log.read() != 'test':
            raise Exception('Failed to read the content of results in '
                            'container.')
Пример #2
0
    def _modify_shadow_config(self):
        """Update the shadow config used in container with correct values.

        This only applies when no shadow SSP deploy config is applied. For
        default SSP deploy config, autotest shadow_config.ini is from autotest
        directory, which requires following modification to be able to work in
        container. If one chooses to use a shadow SSP deploy config file, the
        autotest shadow_config.ini must be from a source with following
        modification:
        1. Disable master ssh connection in shadow config, as it is not working
           properly in container yet, and produces noise in the log.
        2. Update AUTOTEST_WEB/host and SERVER/hostname to be the IP of the host
           if any is set to localhost or 127.0.0.1. Otherwise, set it to be the
           FQDN of the config value.
        3. Update SSP/user, which is used as the user makes RPC inside the
           container. This allows the RPC to pass ACL check as if the call is
           made in the host.

        """
        shadow_config = os.path.join(CONTAINER_AUTOTEST_DIR,
                                     'shadow_config.ini')

        # Inject "AUTOSERV/enable_master_ssh: False" in shadow config as
        # container does not support master ssh connection yet.
        self.container.attach_run(
            'echo $\'\n[AUTOSERV]\nenable_master_ssh: False\n\' >> %s' %
            shadow_config)

        host_ip = lxc_utils.get_host_ip()
        local_names = ['localhost', '127.0.0.1']

        db_host = config.get_config_value('AUTOTEST_WEB', 'host')
        if db_host.lower() in local_names:
            new_host = host_ip
        else:
            new_host = socket.getfqdn(db_host)
        self.container.attach_run(
            'echo $\'\n[AUTOTEST_WEB]\nhost: %s\n\' >> %s' %
            (new_host, shadow_config))

        afe_host = config.get_config_value('SERVER', 'hostname')
        if afe_host.lower() in local_names:
            new_host = host_ip
        else:
            new_host = socket.getfqdn(afe_host)
        self.container.attach_run(
            'echo $\'\n[SERVER]\nhostname: %s\n\' >> %s' %
            (new_host, shadow_config))

        # Update configurations in SSP section:
        # user: The user running current process.
        # is_moblab: True if the autotest server is a Moblab instance.
        # host_container_ip: IP address of the lxcbr0 interface. Process running
        #     inside container can make RPC through this IP.
        self.container.attach_run('echo $\'\n[SSP]\nuser: %s\nis_moblab: %s\n'
                                  'host_container_ip: %s\n\' >> %s' %
                                  (getpass.getuser(), bool(utils.is_moblab()),
                                   lxc_utils.get_host_ip(), shadow_config))
Пример #3
0
def create_chameleon_host(dut, chameleon_args):
    """Create a ChameleonHost object.

    There three possible cases:
    1) If the DUT is in Cros Lab and has a chameleon board, then create
       a ChameleonHost object pointing to the board. chameleon_args
       is ignored.
    2) If not case 1) and chameleon_args is neither None nor empty, then
       create a ChameleonHost object using chameleon_args.
    3) If neither case 1) or 2) applies, return None.

    @param dut: host name of the host that chameleon connects. It can be used
                to lookup the chameleon in test lab using naming convention.
                If dut is an IP address, it can not be used to lookup the
                chameleon in test lab.
    @param chameleon_args: A dictionary that contains args for creating
                           a ChameleonHost object,
                           e.g. {'chameleon_host': '172.11.11.112',
                                 'chameleon_port': 9992}.

    @returns: A ChameleonHost object or None.

    """
    if not utils.is_in_container():
        is_moblab = utils.is_moblab()
    else:
        is_moblab = _CONFIG.get_config_value('SSP',
                                             'is_moblab',
                                             type=bool,
                                             default=False)

    if not is_moblab:
        dut_is_hostname = not dnsname_mangler.is_ip_address(dut)
        if dut_is_hostname:
            chameleon_hostname = chameleon.make_chameleon_hostname(dut)
            if utils.host_is_in_lab_zone(chameleon_hostname):
                # Be more tolerant on chameleon in the lab because
                # we don't want dead chameleon blocks non-chameleon tests.
                if utils.ping(chameleon_hostname, deadline=3):
                    logging.warning(
                        'Chameleon %s is not accessible. Please file a bug'
                        ' to test lab', chameleon_hostname)
                    return None
                return ChameleonHost(chameleon_host=chameleon_hostname)
        if chameleon_args:
            return ChameleonHost(**chameleon_args)
        else:
            return None
    else:
        afe = frontend_wrappers.RetryingAFE(timeout_min=5, delay_sec=10)
        hosts = afe.get_hosts(hostname=dut)
        if hosts and CHAMELEON_HOST_ATTR in hosts[0].attributes:
            return ChameleonHost(
                chameleon_host=hosts[0].attributes[CHAMELEON_HOST_ATTR],
                chameleon_port=hosts[0].attributes.get(CHAMELEON_PORT_ATTR,
                                                       9992))
        else:
            return None
Пример #4
0
def select_32bit_java():
    """Switches to 32 bit java if installed (like in lab lxc images) to save
    about 30-40% server/shard memory during the run."""
    if utils.is_in_container() and not client_utils.is_moblab():
        java = '/usr/lib/jvm/java-8-openjdk-i386'
        if os.path.exists(java):
            logging.info('Found 32 bit java, switching to use it.')
            os.environ['JAVA_HOME'] = java
            os.environ['PATH'] = (os.path.join(java, 'bin') + os.pathsep +
                                  os.environ['PATH'])
Пример #5
0
    def _modify_ssh_config(self):
        """Modify ssh config for it to work inside container.

        This is only called when default ssp_deploy_config is used. If shadow
        deploy config is manually set up, this function will not be called.
        Therefore, the source of ssh config must be properly updated to be able
        to work inside container.

        """
        # Remove domain specific flags.
        ssh_config = '/root/.ssh/config'
        self.container.attach_run('sed -i \'s/UseProxyIf=false//g\' \'%s\'' %
                                  ssh_config)
        # TODO(dshi): crbug.com/451622 ssh connection loglevel is set to
        # ERROR in container before master ssh connection works. This is
        # to avoid logs being flooded with warning `Permanently added
        # '[hostname]' (RSA) to the list of known hosts.` (crbug.com/478364)
        # The sed command injects following at the beginning of .ssh/config
        # used in config. With such change, ssh command will not post
        # warnings.
        # Host *
        #   LogLevel Error
        self.container.attach_run(
            'sed -i \'1s/^/Host *\\n  LogLevel ERROR\\n\\n/\' \'%s\'' %
            ssh_config)

        # Inject ssh config for moblab to ssh to dut from container.
        if utils.is_moblab():
            # ssh to moblab itself using moblab user.
            self.container.attach_run(
                'echo $\'\nHost 192.168.231.1\n  User moblab\n  '
                'IdentityFile %%d/.ssh/testing_rsa\' >> %s' %
                '/root/.ssh/config')
            # ssh to duts using root user.
            self.container.attach_run(
                'echo $\'\nHost *\n  User root\n  '
                'IdentityFile %%d/.ssh/testing_rsa\' >> %s' %
                '/root/.ssh/config')
def setup_test(bucket, container_id, skip_cleanup):
    """Test container can be created from base container.

    @param bucket: ContainerBucket to interact with containers.
    @param container_id: ID of the test container.
    @param skip_cleanup: Set to True to skip cleanup, used to troubleshoot
                         container failures.

    @return: A Container object created for the test container.
    """
    logging.info('Create test container.')
    os.makedirs(RESULT_PATH)
    container = bucket.setup_test(container_id,
                                  TEST_JOB_ID,
                                  AUTOTEST_SERVER_PKG,
                                  RESULT_PATH,
                                  skip_cleanup=skip_cleanup,
                                  job_folder=TEST_JOB_FOLDER,
                                  dut_name='192.168.0.3')

    # Inject "AUTOSERV/testing_mode: True" in shadow config to test autoserv.
    container.attach_run('echo $\'[AUTOSERV]\ntesting_mode: True\' >>'
                         ' /usr/local/autotest/shadow_config.ini')

    if not utils.is_moblab():
        # Create fake '/etc/chrome-infra/ts-mon.json' if it doesn't exist.
        create_key_script = os.path.join(RESULT_PATH,
                                         CREATE_FAKE_TS_MON_CONFIG_SCRIPT)
        with open(create_key_script, 'w') as script:
            script.write(CREATE_FAKE_TS_MON_CONFIG_SCRIPT_CONTENT)
        container_result_path = lxc.RESULT_DIR_FMT % TEST_JOB_FOLDER
        container_create_key_script = os.path.join(
            container_result_path, CREATE_FAKE_TS_MON_CONFIG_SCRIPT)
        container.attach_run('python %s' % container_create_key_script)

    return container
Пример #7
0
def _get_standard_servo_args(dut_host):
    """Return servo data associated with a given DUT.

    This checks for the presence of servo host and port attached to the
    given `dut_host`.  This data should be stored in the
    `_afe_host.attributes` field in the provided `dut_host` parameter.

    @param dut_host   Instance of `Host` on which to find the servo
                      attributes.
    @return A tuple of `servo_args` dict with host and an option port,
            plus an `is_in_lab` flag indicating whether this in the CrOS
            test lab, or some different environment.
    """
    servo_args = None
    is_in_lab = False
    is_ssp_moblab = False
    if utils.is_in_container():
        is_moblab = _CONFIG.get_config_value('SSP',
                                             'is_moblab',
                                             type=bool,
                                             default=False)
        is_ssp_moblab = is_moblab
    else:
        is_moblab = utils.is_moblab()
    attrs = dut_host._afe_host.attributes
    if attrs and SERVO_HOST_ATTR in attrs:
        servo_host = attrs[SERVO_HOST_ATTR]
        if (is_ssp_moblab and servo_host in ['localhost', '127.0.0.1']):
            servo_host = _CONFIG.get_config_value('SSP',
                                                  'host_container_ip',
                                                  type=str,
                                                  default=None)
        servo_args = {SERVO_HOST_ATTR: servo_host}
        if SERVO_PORT_ATTR in attrs:
            try:
                servo_port = attrs[SERVO_PORT_ATTR]
                servo_args[SERVO_PORT_ATTR] = int(servo_port)
            except ValueError:
                logging.error('servo port is not an int: %s', servo_port)
                # Let's set the servo args to None since we're not creating
                # the ServoHost object with the proper port now.
                servo_args = None
        if SERVO_SERIAL_ATTR in attrs:
            servo_args[SERVO_SERIAL_ATTR] = attrs[SERVO_SERIAL_ATTR]
        is_in_lab = (not is_moblab and utils.host_is_in_lab_zone(servo_host))

    # TODO(jrbarnette):  This test to use the default lab servo hostname
    # is a legacy that we need only until every host in the DB has
    # proper attributes.
    elif (not is_moblab
          and not dnsname_mangler.is_ip_address(dut_host.hostname)):
        servo_host = make_servo_hostname(dut_host.hostname)
        is_in_lab = utils.host_is_in_lab_zone(servo_host)
        if is_in_lab:
            servo_args = {SERVO_HOST_ATTR: servo_host}
    if servo_args is not None:
        info = dut_host.host_info_store.get()
        if info.board:
            servo_args[SERVO_BOARD_ATTR] = _map_afe_board_to_servo_board(
                info.board)
    return servo_args, is_in_lab
Пример #8
0
# Path to drone_temp folder in the container, which stores the control file for
# test job to run.
CONTROL_TEMP_PATH = os.path.join(CONTAINER_AUTOTEST_DIR, 'drone_tmp')

# Bash command to return the file count in a directory. Test the existence first
# so the command can return an error code if the directory doesn't exist.
COUNT_FILE_CMD = '[ -d %(dir)s ] && ls %(dir)s | wc -l'

# Command line to append content to a file
APPEND_CMD_FMT = ('echo \'%(content)s\' | sudo tee --append %(file)s'
                  '> /dev/null')

# Flag to indicate it's running in a Moblab. Due to crbug.com/457496, lxc-ls has
# different behavior in Moblab.
IS_MOBLAB = common_utils.is_moblab()

if IS_MOBLAB:
    SITE_PACKAGES_PATH = '/usr/lib64/python2.7/site-packages'
    CONTAINER_SITE_PACKAGES_PATH = '/usr/local/lib/python2.7/dist-packages/'
else:
    SITE_PACKAGES_PATH = os.path.join(common.autotest_dir, 'site-packages')
    CONTAINER_SITE_PACKAGES_PATH = os.path.join(CONTAINER_AUTOTEST_DIR,
                                                'site-packages')

# TODO(dshi): If we are adding more logic in how lxc should interact with
# different systems, we should consider code refactoring to use a setting-style
# object to store following flags mapping to different systems.
# TODO(crbug.com/464834): Snapshot clone is disabled until Moblab can
# support overlayfs or aufs, which requires a newer kernel.
SUPPORT_SNAPSHOT_CLONE = not IS_MOBLAB
    def _download_to_cache(self, uri):
        """Downloads the uri from the storage server.

        It always checks the cache for available binaries first and skips
        download if binaries are already in cache.

        The caller of this function is responsible for holding the cache lock.

        @param uri: The Google Storage or dl.google.com uri.
        @return Path to the downloaded object, name.
        """
        # Split uri into 3 pieces for use by gsutil and also by wget.
        parsed = urlparse.urlparse(uri)
        filename = os.path.basename(parsed.path)
        # We are hashing the uri instead of the binary. This is acceptable, as
        # the uris are supposed to contain version information and an object is
        # not supposed to be changed once created.
        output_dir = os.path.join(self._tradefed_cache,
                                  hashlib.md5(uri).hexdigest())
        output = os.path.join(output_dir, filename)
        # Check for existence of file.
        if os.path.exists(output):
            logging.info('Skipping download of %s, reusing %s.', uri, output)
            return output
        self._safe_makedirs(output_dir)

        if parsed.scheme not in ['gs', 'http', 'https']:
            raise error.TestFail('Error: Unknown download scheme %s' %
                                 parsed.scheme)
        if parsed.scheme in ['http', 'https']:
            logging.info('Using wget to download %s to %s.', uri, output_dir)
            # We are downloading 1 file at a time, hence using -O over -P.
            utils.run('wget',
                      args=('--report-speed=bits', '-O', output, uri),
                      verbose=True)
            return output

        if not client_utils.is_moblab():
            # If the machine can access to the storage server directly,
            # defer to "gsutil" for downloading.
            logging.info('Host %s not in lab. Downloading %s directly to %s.',
                         self._host.hostname, uri, output)
            # b/17445576: gsutil rsync of individual files is not implemented.
            utils.run('gsutil', args=('cp', uri, output), verbose=True)
            return output

        # We are in the moblab. Because the machine cannot access the storage
        # server directly, use dev server to proxy.
        logging.info('Host %s is in lab. Downloading %s by staging to %s.',
                     self._host.hostname, uri, output)

        dirname = os.path.dirname(parsed.path)
        archive_url = '%s://%s%s' % (parsed.scheme, parsed.netloc, dirname)

        # First, request the devserver to download files into the lab network.
        # TODO(ihf): Switch stage_artifacts to honor rsync. Then we don't have
        # to shuffle files inside of tarballs.
        info = self._host.host_info_store.get()
        ds = dev_server.ImageServer.resolve(info.build)
        ds.stage_artifacts(info.build,
                           files=[filename],
                           archive_url=archive_url)

        # Then download files from the dev server.
        # TODO(ihf): use rsync instead of wget. Are there 3 machines involved?
        # Itself, dev_server plus DUT? Or is there just no rsync in moblab?
        ds_src = '/'.join([ds.url(), 'static', dirname, filename])
        logging.info('dev_server URL: %s', ds_src)
        # Calls into DUT to pull uri from dev_server.
        utils.run('wget',
                  args=('--report-speed=bits', '-O', output, ds_src),
                  verbose=True)
        return output
Пример #10
0
# Bash command to return the file count in a directory. Test the existence first
# so the command can return an error code if the directory doesn't exist.
COUNT_FILE_CMD = '[ -d %(dir)s ] && ls %(dir)s | wc -l'

# Command line to append content to a file
APPEND_CMD_FMT = ('echo \'%(content)s\' | sudo tee --append %(file)s'
                  '> /dev/null')

# Path to site-packates in Moblab
MOBLAB_SITE_PACKAGES = '/usr/lib64/python2.7/site-packages'
MOBLAB_SITE_PACKAGES_CONTAINER = '/usr/local/lib/python2.7/dist-packages/'

# Flag to indicate it's running in a Moblab. Due to crbug.com/457496, lxc-ls has
# different behavior in Moblab.
IS_MOBLAB = utils.is_moblab()

# TODO(dshi): If we are adding more logic in how lxc should interact with
# different systems, we should consider code refactoring to use a setting-style
# object to store following flags mapping to different systems.
# TODO(crbug.com/464834): Snapshot clone is disabled until Moblab can
# support overlayfs or aufs, which requires a newer kernel.
SUPPORT_SNAPSHOT_CLONE = not IS_MOBLAB

# Number of seconds to wait for network to be up in a container.
NETWORK_INIT_TIMEOUT = 300
# Network bring up is slower in Moblab.
NETWORK_INIT_CHECK_INTERVAL = 2 if IS_MOBLAB else 0.1

# Type string for container related metadata.
CONTAINER_CREATE_METADB_TYPE = 'container_create'