def test_success(self):
        tasklist = self._setup_common()

        for task in tasklist:
            task.fork_waitfor.expect_call(timeout=None).and_return(0)
            (subcommand.cPickle.load.expect_call(task.result_pickle)
                    .and_return(6))
            task.result_pickle.close.expect_call()

        subcommand.parallel(tasklist)
        self.god.check_playback()
Esempio n. 2
0
    def test_success(self):
        tasklist = self._setup_common()

        for task in tasklist:
            task.fork_waitfor.expect_call(timeout=None).and_return(0)
            (subcommand.cPickle.load.expect_call(
                task.result_pickle).and_return(6))
            task.result_pickle.close.expect_call()

        subcommand.parallel(tasklist)
        self.god.check_playback()
Esempio n. 3
0
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('iperf', server_ip='%s', client_ip=",
            "'%s', role='%s', udp=%s, bidirectional=%s,",
            "test_time=%d, stream_list=%s, tag='%s')"
        ])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 4
0
    def upload_pkg(self,
                   pkg_path,
                   upload_path=None,
                   update_checksum=False,
                   timeout=300):
        from autotest.server import subcommand
        if upload_path:
            upload_path_list = [upload_path]
            self.upkeep(upload_path_list)
        elif len(self.upload_paths) > 0:
            self.upkeep()
            upload_path_list = self.upload_paths
        else:
            raise error.PackageUploadError("Invalid Upload Path specified")

        if update_checksum:
            # get the packages' checksum file and update it with the current
            # package's checksum
            self.update_checksum(pkg_path)

        commands = []
        for path in upload_path_list:
            commands.append(
                subcommand.subcommand(self.upload_pkg_parallel,
                                      (pkg_path, path, update_checksum)))

        results = subcommand.parallel(commands, timeout, return_results=True)
        for result in results:
            if result:
                print str(result)
Esempio n. 5
0
    def upload_pkg(self, pkg_path, upload_path=None, update_checksum=False,
                   timeout=300):
        from autotest.server import subcommand
        if upload_path:
            upload_path_list = [upload_path]
            self.upkeep(upload_path_list)
        elif len(self.upload_paths) > 0:
            self.upkeep()
            upload_path_list = self.upload_paths
        else:
            raise error.PackageUploadError("Invalid Upload Path specified")

        if update_checksum:
            # get the packages' checksum file and update it with the current
            # package's checksum
            self.update_checksum(pkg_path)

        commands = []
        for path in upload_path_list:
            commands.append(subcommand.subcommand(self.upload_pkg_parallel,
                                                  (pkg_path, path,
                                                   update_checksum)))

        results = subcommand.parallel(commands, timeout, return_results=True)
        for result in results:
            if result:
                print str(result)
Esempio n. 6
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('netpipe', server_ip='%s', ",
            "client_ip='%s', role='%s', bidirectional=True, ",
            "buffer_size=%d, upper_bound=%d,"
            "perturbation_size=%d)"
        ])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 7
0
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('iperf', server_ip='%s', client_ip=",
                            "'%s', role='%s', udp=%s, bidirectional=%s,",
                            "test_time=%d, stream_list=%s, tag='%s')"])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(server_at.run,
                                               [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                               [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 8
0
    def run_once(self, pair, test, time, stream_list, cycles):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a label for the server side tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are enabled.
        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('netperf2', server_ip='%s', ",
            "client_ip='%s', role='%s', test='%s', ",
            "test_time=%d, stream_list=%s, tag='%s', ", "iterations=%d)"
        ])

        server_control_file = template % (server.ip, client.ip, 'server', test,
                                          time, stream_list, test, cycles)
        client_control_file = template % (server.ip, client.ip, 'client', test,
                                          time, stream_list, test, cycles)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 9
0
    def run_once(self, pair, test, time, stream_list, cycles):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a label for the server side tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)


        # Disable IPFilters if they are enabled.
        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netperf2', server_ip='%s', ",
                            "client_ip='%s', role='%s', test='%s', ",
                            "test_time=%d, stream_list=%s, tag='%s', ",
                            "iterations=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server', test,
                                          time, stream_list, test, cycles)
        client_control_file = template % (server.ip, client.ip, 'client', test,
                                          time, stream_list, test, cycles)

        server_command = subcommand.subcommand(server_at.run,
                                    [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                    [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 10
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netpipe', server_ip='%s', ",
                            "client_ip='%s', role='%s', bidirectional=True, ",
                            "buffer_size=%d, upper_bound=%d,"
                            "perturbation_size=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(server_at.run,
                                               [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                               [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
Esempio n. 11
0
    def test_return_results(self):
        tasklist = self._setup_common()

        tasklist[0].fork_waitfor.expect_call(timeout=None).and_return(0)
        (subcommand.cPickle.load.expect_call(tasklist[0].result_pickle)
                .and_return(6))
        tasklist[0].result_pickle.close.expect_call()

        error = Exception('fail')
        tasklist[1].fork_waitfor.expect_call(timeout=None).and_return(1)
        (subcommand.cPickle.load.expect_call(tasklist[1].result_pickle)
                .and_return(error))
        tasklist[1].result_pickle.close.expect_call()

        self.assertEquals(subcommand.parallel(tasklist, return_results=True),
                          [6, error])
        self.god.check_playback()
Esempio n. 12
0
    def test_return_results(self):
        tasklist = self._setup_common()

        tasklist[0].fork_waitfor.expect_call(timeout=None).and_return(0)
        (subcommand.cPickle.load.expect_call(
            tasklist[0].result_pickle).and_return(6))
        tasklist[0].result_pickle.close.expect_call()

        error = Exception('fail')
        tasklist[1].fork_waitfor.expect_call(timeout=None).and_return(1)
        (subcommand.cPickle.load.expect_call(
            tasklist[1].result_pickle).and_return(error))
        tasklist[1].result_pickle.close.expect_call()

        self.assertEquals(subcommand.parallel(tasklist, return_results=True),
                          [6, error])
        self.god.check_playback()
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs(
                "multi_host_migration")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [x for x in map(lambda x: utils_params.Params(x), test_dicts)]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        for params in test_dicts_ar:

            params['hosts'] = ips

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    params['mac_%s_%s' % (nic, vm)] = generate_mac_address()

            params['master_images_clone'] = "image1"
            params['kill_vm'] = "yes"

            s_host = _hosts[machines[0]]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[0]

            for host_id, machine in enumerate(machines[1:]):
                host = _hosts[machine]
                host.params = params.object_params("host%s" % (host_id + 2))
                params['not_preprocess'] = "yes"
                host.params['clone_master'] = "no"
                host.params['hostid'] = ips[host_id + 1]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            for machine in machines:
                host = _hosts[machine]
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))

            logging.debug('Master control file:\n%s', _hosts[machines[0]].control)
            for machine in machines[1:]:
                host = _hosts[machine]
                logging.debug('Slave control file:\n%s', host.control)

            commands = []

            for machine in machines:
                host = _hosts[machine]
                result_path = os.path.join(self.resultsdir,
                                           host.params["shortname"],
                                           host.host.hostname)
                commands.append(subcommand.subcommand(host.at.run,
                                                      [host.control,
                                                       result_path]))

            try:
                subcommand.parallel(commands)
            except error.AutoservError, e:
                logging.error(e)
Esempio n. 14
0
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        cpu_number = 2**31
        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)
            cpu_number = min(host.host.get_num_cpu(), cpu_number)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs("qemu")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        machine_hold_vm = machines[0]

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [
            x for x in map(lambda x: utils_params.Params(x), test_dicts)
        ]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        keep_macs = {}
        random_cpu_number = random.randint(1, cpu_number)
        for params in test_dicts_ar:

            params['hosts'] = ips
            if params.get("use_randome_smp") == "yes":
                params['smp'] = random_cpu_number

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    if 'mac_%s_%s' % (nic, vm) not in keep_macs:
                        keep_macs['mac_%s_%s' %
                                  (nic, vm)] = generate_mac_address()
                    params['mac_%s_%s' % (nic, vm)] = keep_macs['mac_%s_%s' %
                                                                (nic, vm)]

            s_host = _hosts[machine_hold_vm]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[machines.index(machine_hold_vm)]

            for host_id, machine in enumerate(machines):
                if machine != machine_hold_vm:
                    host = _hosts[machine]
                    host_name = "host%s" % (host_id + 2)
                    host.params = params.object_params("host%s" %
                                                       (host_id + 2))
                    params['not_preprocess'] = "yes"
                    host.params['clone_master'] = "no"
                    host.params['hostid'] = ips[host_id]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            if params.get("need_multi_host") == "yes":
                for machine in machines:
                    host = _hosts[machine]
                    host.control += ("job.run_test('virt', tag='%s',"
                                     " params=%s)" %
                                     (host.params['shortname'], host.params))

                logging.debug('Master control file:\n%s',
                              _hosts[machine_hold_vm].control)
                for machine in machines:
                    if machine != machine_hold_vm:
                        host = _hosts[machine]
                        logging.debug('Slave control file:\n%s', host.control)

                commands = []

                for machine in machines:
                    host = _hosts[machine]
                    result_path = os.path.join(self.resultsdir,
                                               host.host.hostname,
                                               host.params["shortname"])
                    cmd = subcommand.subcommand(host.at.run,
                                                [host.control, result_path])
                    commands.append(cmd)
            else:
                host = _hosts[machine_hold_vm]
                result_path = os.path.join(self.resultsdir, host.host.hostname,
                                           host.params["shortname"])
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))
                logging.debug("Run control file:\n %s", host.control)
                commands = [
                    subcommand.subcommand(host.at.run,
                                          [host.control, result_path])
                ]
            try:
                subcommand.parallel(commands)
                if params.get("vm_migrated") == "yes":
                    # This update based on the logical in test case
                    # migration_multi_host. It use the machines[0] as
                    # src and machines[1] as dst. This may need update
                    # based on different design. Just keep the mahinces
                    # and ips list in the right order for following tests.
                    machine_hold_vm = machines[1]
                    ip_hold_vm = ips[1]
                    machines.remove(machine_hold_vm)
                    ips.remove(ip_hold_vm)

                    if params.get("random_dst_host") == "yes":
                        my_random = random.SystemRandom()
                        dst_machine = my_random.choice(machines)
                        dst_ip = ips[machines.index(dst_machine)]
                    else:
                        dst_machine = machines[0]
                        dst_ip = ips[0]
                    machines.remove(dst_machine)
                    ips.remove(dst_ip)

                    machines.insert(0, machine_hold_vm)
                    machines.insert(1, dst_machine)
                    ips.insert(0, ip_hold_vm)
                    ips.insert(1, dst_ip)

            except error.AutoservError, e:
                logging.error(e)
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        cpu_number = 2 ** 31
        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)
            cpu_number = min(host.host.get_num_cpu(), cpu_number)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs("qemu")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        machine_hold_vm = machines[0]

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [x for x in map(lambda x: utils_params.Params(x), test_dicts)]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        keep_macs = {}
        random_cpu_number = random.randint(1, cpu_number)
        for params in test_dicts_ar:

            params['hosts'] = ips
            if params.get("use_randome_smp") == "yes":
                params['smp'] = random_cpu_number

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    if 'mac_%s_%s' % (nic, vm) not in keep_macs:
                        keep_macs['mac_%s_%s' % (nic, vm)] = generate_mac_address()
                    params['mac_%s_%s' % (nic, vm)] = keep_macs['mac_%s_%s' % (nic, vm)]

            s_host = _hosts[machine_hold_vm]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[machines.index(machine_hold_vm)]

            for host_id, machine in enumerate(machines):
                if machine != machine_hold_vm:
                    host = _hosts[machine]
                    host_name = "host%s" % (host_id + 2)
                    host.params = params.object_params("host%s" % (host_id + 2))
                    params['not_preprocess'] = "yes"
                    host.params['clone_master'] = "no"
                    host.params['hostid'] = ips[host_id]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            if params.get("need_multi_host") == "yes":
                for machine in machines:
                    host = _hosts[machine]
                    host.control += ("job.run_test('virt', tag='%s',"
                                     " params=%s)" %
                                     (host.params['shortname'], host.params))

                logging.debug('Master control file:\n%s',
                               _hosts[machine_hold_vm].control)
                for machine in machines:
                    if machine != machine_hold_vm:
                        host = _hosts[machine]
                        logging.debug('Slave control file:\n%s', host.control)

                commands = []

                for machine in machines:
                    host = _hosts[machine]
                    result_path = os.path.join(self.resultsdir,
                                               host.host.hostname,
                                               host.params["shortname"])
                    cmd = subcommand.subcommand(host.at.run,
                                                [host.control,
                                                 result_path])
                    commands.append(cmd)
            else:
                host = _hosts[machine_hold_vm]
                result_path = os.path.join(self.resultsdir,
                                           host.host.hostname,
                                           host.params["shortname"])
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))
                logging.debug("Run control file:\n %s", host.control)
                commands = [subcommand.subcommand(host.at.run,
                                                  [host.control,
                                                   result_path])]
            try:
                subcommand.parallel(commands)
                if params.get("vm_migrated") == "yes":
                    # This update based on the logical in test case
                    # migration_multi_host. It use the machines[0] as
                    # src and machines[1] as dst. This may need update
                    # based on different design. Just keep the mahinces
                    # and ips list in the right order for following tests.
                    machine_hold_vm = machines[1]
                    ip_hold_vm = ips[1]
                    machines.remove(machine_hold_vm)
                    ips.remove(ip_hold_vm)

                    if params.get("random_dst_host") == "yes":
                        my_random = random.SystemRandom()
                        dst_machine = my_random.choice(machines)
                        dst_ip = ips[machines.index(dst_machine)]
                    else:
                        dst_machine = machines[0]
                        dst_ip = ips[0]
                    machines.remove(dst_machine)
                    ips.remove(dst_ip)

                    machines.insert(0, machine_hold_vm)
                    machines.insert(1, dst_machine)
                    ips.insert(0, ip_hold_vm)
                    ips.insert(1, dst_ip)

            except error.AutoservError, e:
                logging.error(e)