def testContainerName(self):
        """Tests that created containers have the right name."""
        id0 = lxc.ContainerId(1, 2, 3)
        id1 = lxc.ContainerId(42, 41, 40)

        container0 = self.test_factory.create_container(id0)
        container1 = self.test_factory.create_container(id1)

        self.assertEqual(str(id0), container0.name)
        self.assertEqual(str(id1), container1.name)
    def testCreateContainer_alreadyExists(self):
        """Tests that container ID conflicts raise errors as expected."""
        id0 = lxc.ContainerId(1, 2, 3)

        self.test_factory.create_container(id0)
        with self.assertRaises(error.ContainerError):
            self.test_factory.create_container(id0)
    def testCreateContainer_forceReset(self):
        """Tests that force-resetting containers works."""
        factory = lxc.ContainerFactory(base_container=self.base_container,
                                       lxc_path=self.test_dir,
                                       force_cleanup=True)

        id0 = lxc.ContainerId(1, 2, 3)
        container0 = factory.create_container(id0)
        container0.start(wait_for_network=False)

        # Create a file in the original container.
        tmpfile = container0.attach_run('mktemp').stdout
        exists = 'test -e %s' % tmpfile
        try:
            container0.attach_run(exists)
        except error.CmdError as e:
            self.fail(e)

        # Create a new container in place of the original, then verify that the
        # file is no longer there.
        container1 = factory.create_container(id0)
        container1.start(wait_for_network=False)
        with self.assertRaises(error.CmdError):
            container1.attach_run(exists)
Exemple #4
0
def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
    """Run server job with given options.

    @param pid_file_manager: PidFileManager used to monitor the autoserv process
    @param results: Folder to store results.
    @param parser: Parser for the command line arguments.
    @param ssp_url: Url to server-side package.
    @param use_ssp: Set to True to run with server-side packaging.
    """
    # send stdin to /dev/null
    dev_null = os.open(os.devnull, os.O_RDONLY)
    os.dup2(dev_null, sys.stdin.fileno())
    os.close(dev_null)

    # Create separate process group if the process is not a process group
    # leader. This allows autoserv process to keep running after the caller
    # process (drone manager call) exits.
    if os.getpid() != os.getpgid(0):
        os.setsid()

    # Container name is predefined so the container can be destroyed in
    # handle_sigterm.
    job_or_task_id = job_directories.get_job_id_or_task_id(
        parser.options.results)
    container_id = lxc.ContainerId(job_or_task_id, time.time(), os.getpid())

    # Implement SIGTERM handler
    def handle_sigterm(signum, frame):
        logging.debug('Received SIGTERM')
        if pid_file_manager:
            pid_file_manager.close_file(1, signal.SIGTERM)
        logging.debug('Finished writing to pid_file. Killing process.')

        # Update results folder's file permission. This needs to be done ASAP
        # before the parsing process tries to access the log.
        if use_ssp and results:
            correct_results_folder_permission(results)

        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
        # This sleep allows the pending output to be logged before the kill
        # signal is sent.
        time.sleep(.1)
        if use_ssp:
            logging.debug(
                'Destroy container %s before aborting the autoserv '
                'process.', container_id)
            try:
                bucket = lxc.ContainerBucket()
                container = bucket.get_container(container_id)
                if container:
                    container.destroy()
                else:
                    logging.debug('Container %s is not found.', container_id)
            except:
                # Handle any exception so the autoserv process can be aborted.
                logging.exception('Failed to destroy container %s.',
                                  container_id)
            # Try to correct the result file permission again after the
            # container is destroyed, as the container might have created some
            # new files in the result folder.
            if results:
                correct_results_folder_permission(results)

        os.killpg(os.getpgrp(), signal.SIGKILL)

    # Set signal handler
    signal.signal(signal.SIGTERM, handle_sigterm)

    # faulthandler is only needed to debug in the Lab and is not avaliable to
    # be imported in the chroot as part of VMTest, so Try-Except it.
    try:
        import faulthandler
        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
        logging.debug('faulthandler registered on SIGTERM.')
    except ImportError:
        sys.exc_clear()

    # Ignore SIGTTOU's generated by output from forked children.
    signal.signal(signal.SIGTTOU, signal.SIG_IGN)

    # If we received a SIGALARM, let's be loud about it.
    signal.signal(signal.SIGALRM, log_alarm)

    # Server side tests that call shell scripts often depend on $USER being set
    # but depending on how you launch your autotest scheduler it may not be set.
    os.environ['USER'] = getpass.getuser()

    label = parser.options.label
    group_name = parser.options.group_name
    user = parser.options.user
    client = parser.options.client
    server = parser.options.server
    verify = parser.options.verify
    repair = parser.options.repair
    cleanup = parser.options.cleanup
    provision = parser.options.provision
    reset = parser.options.reset
    job_labels = parser.options.job_labels
    no_tee = parser.options.no_tee
    execution_tag = parser.options.execution_tag
    ssh_user = parser.options.ssh_user
    ssh_port = parser.options.ssh_port
    ssh_pass = parser.options.ssh_pass
    collect_crashinfo = parser.options.collect_crashinfo
    control_filename = parser.options.control_filename
    verify_job_repo_url = parser.options.verify_job_repo_url
    skip_crash_collection = parser.options.skip_crash_collection
    ssh_verbosity = int(parser.options.ssh_verbosity)
    ssh_options = parser.options.ssh_options
    no_use_packaging = parser.options.no_use_packaging
    in_lab = bool(parser.options.lab)

    # can't be both a client and a server side test
    if client and server:
        parser.parser.error(
            "Can not specify a test as both server and client!")

    if provision and client:
        parser.parser.error("Cannot specify provisioning and client!")

    is_special_task = (verify or repair or cleanup or collect_crashinfo
                       or provision or reset)
    use_client_trampoline = False
    if parser.options.control_name:
        if use_ssp:
            # When use_ssp is True, autoserv will be re-executed inside a
            # container preserving the --control-name argument. Control file
            # will be staged inside the rexecuted autoserv.
            control = None
        else:
            try:
                control = _stage_control_file(parser.options.control_name,
                                              results)
            except error.AutoservError as e:
                logging.info("Using client trampoline because of: %s", e)
                control = parser.options.control_name
                use_client_trampoline = True

    elif parser.args:
        control = parser.args[0]
    else:
        if not is_special_task:
            parser.parser.error("Missing argument: control file")
        control = None

    if ssh_verbosity > 0:
        # ssh_verbosity is an integer between 0 and 3, inclusive
        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
    else:
        ssh_verbosity_flag = ''

    machines = _get_machines(parser)
    if group_name and len(machines) < 2:
        parser.parser.error('-G %r may only be supplied with more than one '
                            'machine.' % group_name)

    job_kwargs = {
        'control':
        control,
        'args':
        parser.args[1:],
        'resultdir':
        results,
        'label':
        label,
        'user':
        user,
        'machines':
        machines,
        'machine_dict_list':
        server_job.get_machine_dicts(
            machine_names=machines,
            store_dir=os.path.join(results, parser.options.host_info_subdir),
            in_lab=in_lab,
            use_shadow_store=not parser.options.local_only_host_info,
            host_attributes=parser.options.host_attributes,
        ),
        'client':
        client,
        'ssh_user':
        ssh_user,
        'ssh_port':
        ssh_port,
        'ssh_pass':
        ssh_pass,
        'ssh_verbosity_flag':
        ssh_verbosity_flag,
        'ssh_options':
        ssh_options,
        'group_name':
        group_name,
        'tag':
        execution_tag,
        'disable_sysinfo':
        parser.options.disable_sysinfo,
        'in_lab':
        in_lab,
        'use_client_trampoline':
        use_client_trampoline,
    }
    if parser.options.parent_job_id:
        job_kwargs['parent_job_id'] = int(parser.options.parent_job_id)
    if control_filename:
        job_kwargs['control_filename'] = control_filename
    job = server_job.server_job(**job_kwargs)

    job.logging.start_logging()

    # perform checks
    job.precheck()

    # run the job
    exit_code = 0
    auto_start_servod = _CONFIG.get_config_value('AUTOSERV',
                                                 'auto_start_servod',
                                                 type=bool,
                                                 default=False)

    site_utils.SetupTsMonGlobalState('autoserv',
                                     indirect=False,
                                     short_lived=True)
    try:
        try:
            if repair:
                if auto_start_servod and len(machines) == 1:
                    _start_servod(machines[0])
                job.repair(job_labels)
            elif verify:
                job.verify(job_labels)
            elif provision:
                job.provision(job_labels)
            elif reset:
                job.reset(job_labels)
            elif cleanup:
                job.cleanup(job_labels)
            else:
                if auto_start_servod and len(machines) == 1:
                    _start_servod(machines[0])
                if use_ssp:
                    try:
                        _run_with_ssp(job, container_id, job_or_task_id,
                                      results, parser, ssp_url, machines)
                    finally:
                        # Update the ownership of files in result folder.
                        correct_results_folder_permission(results)
                else:
                    if collect_crashinfo:
                        # Update the ownership of files in result folder. If the
                        # job to collect crashinfo was running inside container
                        # (SSP) and crashed before correcting folder permission,
                        # the result folder might have wrong permission setting.
                        try:
                            correct_results_folder_permission(results)
                        except:
                            # Ignore any error as the user may not have root
                            # permission to run sudo command.
                            pass
                    metric_name = ('chromeos/autotest/experimental/'
                                   'autoserv_job_run_duration')
                    f = {
                        'in_container': utils.is_in_container(),
                        'success': False
                    }
                    with metrics.SecondsTimer(metric_name, fields=f) as c:
                        job.run(verify_job_repo_url=verify_job_repo_url,
                                only_collect_crashinfo=collect_crashinfo,
                                skip_crash_collection=skip_crash_collection,
                                job_labels=job_labels,
                                use_packaging=(not no_use_packaging))
                        c['success'] = True

        finally:
            job.close()
            # Special task doesn't run parse, so result summary needs to be
            # built here.
            if results and (repair or verify or reset or cleanup or provision):
                # Throttle the result on the server side.
                try:
                    result_utils.execute(
                        results, control_data.DEFAULT_MAX_RESULT_SIZE_KB)
                except:
                    logging.exception(
                        'Non-critical failure: Failed to throttle results '
                        'in directory %s.', results)
                # Build result view and report metrics for result sizes.
                site_utils.collect_result_sizes(results)
    except:
        exit_code = 1
        traceback.print_exc()
    finally:
        metrics.Flush()

    sys.exit(exit_code)
 def testCreateContainer_withId(self):
     """Tests container creation with given IDs. """
     id0 = lxc.ContainerId(1, 2, 3)
     container = self.test_factory.create_container(id0)
     self.assertEquals(id0, container.id)