Exemplo n.º 1
0
    def _record_missing_tests(self, missing):
        """Records tests with missing results in job keyval file.

        @param missing: List of string names of Tast tests with missing results.
        """
        keyvals = {}
        for i, name in enumerate(sorted(missing)):
            keyvals['%s%d' % (self._MISSING_TEST_KEYVAL_PREFIX, i)] = name
        utils.write_keyval(self.job.resultdir, keyvals)
Exemplo n.º 2
0
def run_job(job,
            host,
            autotest_path,
            results_directory,
            fast_mode,
            id_digits=1,
            ssh_verbosity=0,
            ssh_options=None,
            args=None,
            pretend=False,
            autoserv_verbose=False,
            host_attributes={}):
    """
    Shell out to autoserv to run an individual test job.

    @param job: A Job object containing the control file contents and other
                relevent metadata for this test.
    @param host: Hostname of DUT to run test against.
    @param autotest_path: Absolute path of autotest directory.
    @param results_directory: Absolute path of directory to store results in.
                              (results will be stored in subdirectory of this).
    @param fast_mode: bool to use fast mode (disables slow autotest features).
    @param id_digits: The minimum number of digits that job ids should be
                      0-padded to when formatting as a string for results
                      directory.
    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
    @param ssh_options: Additional ssh options to be passed to autoserv_utils
    @param args: String that should be passed as args parameter to autoserv,
                 and then ultimitely to test itself.
    @param pretend: If True, will print out autoserv commands rather than
                    running them.
    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
    @param host_attributes: Dict of host attributes to pass into autoserv.

    @returns: a tuple, return code of the job and absolute path of directory
              where results were stored.
    """
    with tempfile.NamedTemporaryFile() as temp_file:
        temp_file.write(job.control_file)
        temp_file.flush()
        name_tail = job.name.split('/')[-1]
        results_directory = os.path.join(
            results_directory,
            'results-%0*d-%s' % (id_digits, job.id, name_tail))
        # Drop experimental keyval in the keval file in the job result folder.
        os.makedirs(results_directory)
        utils.write_keyval(
            results_directory, {
                constants.JOB_EXPERIMENTAL_KEY:
                job.keyvals[constants.JOB_EXPERIMENTAL_KEY]
            })
        extra_args = [temp_file.name]
        if args:
            extra_args.extend(['--args', args])

        command = autoserv_utils.autoserv_run_job_command(
            os.path.join(autotest_path, 'server'),
            machines=host,
            job=job,
            verbose=autoserv_verbose,
            results_directory=results_directory,
            fast_mode=fast_mode,
            ssh_verbosity=ssh_verbosity,
            ssh_options=ssh_options,
            extra_args=extra_args,
            no_console_prefix=True,
            use_packaging=False,
            host_attributes=host_attributes)

        code = _run_autoserv(command, pretend)
        return code, results_directory
Exemplo n.º 3
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        # Starting a test indents the status.log entries. This test starts 2
        # additional tests causing their log entries to be indented twice. This
        # double indent confuses the parser, so reset the indent level on the
        # job, let the forked tests record their entries, then restore the
        # previous indent level.
        self.job._indenter.decrement()

        server_at = autotest.Autotest(server)
        client_at = autotest.Autotest(client)

        template = ''.join([
            "job.run_test('netpipe', server_ip='%s', ",
            "client_ip='%s', role='%s', bidirectional=True, ",
            "buffer_size=%d, upper_bound=%d,"
            "perturbation_size=%d, tag='%s')"
        ])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance,
                                          'server')
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance,
                                          'client')

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname],
            subdir='../')
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname],
            subdir='../')

        subcommand.parallel([server_command, client_command])

        # The parser needs a keyval file to know what host ran the test.
        utils.write_keyval('../' + server.hostname,
                           {"hostname": server.hostname})
        utils.write_keyval('../' + client.hostname,
                           {"hostname": client.hostname})

        # Restore indent level of main job.
        self.job._indenter.increment()

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Exemplo n.º 4
0
    def test_run_job(self):
        class Object():
            pass

        autotest_path = 'htap_tsetotua'
        autoserv_command = os.path.join(autotest_path, 'server', 'autoserv')
        remote = 'etomer'
        results_dir = '/tmp/fakeresults'
        fast_mode = False
        job1_results_dir = '/tmp/fakeresults/results-1-gilbert'
        job2_results_dir = '/tmp/fakeresults/results-2-sullivan'
        args = 'matey'
        expected_args_sublist = ['--args', args]
        experimental_keyval = {constants.JOB_EXPERIMENTAL_KEY: False}
        self.mox = mox.Mox()

        # Create some dummy job objects.
        job1 = Object()
        job2 = Object()
        setattr(job1, 'control_type', 'cLiEnT')
        setattr(job1, 'control_file', 'c1')
        setattr(job1, 'id', 1)
        setattr(job1, 'name', 'gilbert')
        setattr(job1, 'keyvals', experimental_keyval)

        setattr(job2, 'control_type', 'Server')
        setattr(job2, 'control_file', 'c2')
        setattr(job2, 'id', 2)
        setattr(job2, 'name', 'sullivan')
        setattr(job2, 'keyvals', experimental_keyval)

        id_digits = 1

        # Stub out subprocess.Popen and wait calls.
        # Make them expect correct arguments.
        def fake_readline():
            return b''

        mock_process_1 = self.mox.CreateMock(subprocess.Popen)
        mock_process_2 = self.mox.CreateMock(subprocess.Popen)
        fake_stdout = self.mox.CreateMock(file)
        fake_returncode = 0
        mock_process_1.stdout = fake_stdout
        mock_process_1.returncode = fake_returncode
        mock_process_2.stdout = fake_stdout
        mock_process_2.returncode = fake_returncode

        self.mox.StubOutWithMock(os, 'makedirs')
        self.mox.StubOutWithMock(utils, 'write_keyval')
        self.mox.StubOutWithMock(subprocess, 'Popen')

        os.makedirs(job1_results_dir)
        utils.write_keyval(job1_results_dir, experimental_keyval)
        arglist_1 = [
            autoserv_command, '-p', '-r', job1_results_dir, '-m', remote,
            '--no_console_prefix', '-l', 'gilbert', '-c'
        ]
        subprocess.Popen(mox.And(StartsWithList(arglist_1),
                                 ContainsSublist(expected_args_sublist)),
                         stdout=subprocess.PIPE,
                         stderr=subprocess.STDOUT).AndReturn(mock_process_1)
        mock_process_1.stdout.readline().AndReturn(b'')
        mock_process_1.wait().AndReturn(0)

        os.makedirs(job2_results_dir)
        utils.write_keyval(job2_results_dir, experimental_keyval)
        arglist_2 = [
            autoserv_command, '-p', '-r', job2_results_dir, '-m', remote,
            '--no_console_prefix', '-l', 'sullivan', '-s'
        ]
        subprocess.Popen(mox.And(StartsWithList(arglist_2),
                                 ContainsSublist(expected_args_sublist)),
                         stdout=subprocess.PIPE,
                         stderr=subprocess.STDOUT).AndReturn(mock_process_2)
        mock_process_2.stdout.readline().AndReturn(b'')
        mock_process_2.wait().AndReturn(0)

        # Test run_job.
        self.mox.ReplayAll()
        code, job_res = test_runner_utils.run_job(job1, remote, autotest_path,
                                                  results_dir, fast_mode,
                                                  id_digits, 0, None, args)
        self.assertEqual(job_res, job1_results_dir)
        self.assertEqual(code, 0)
        code, job_res = test_runner_utils.run_job(job2, remote, autotest_path,
                                                  results_dir, fast_mode,
                                                  id_digits, 0, None, args)

        self.assertEqual(job_res, job2_results_dir)
        self.assertEqual(code, 0)
        self.mox.UnsetStubs()
        self.mox.VerifyAll()
        self.mox.ResetAll()