예제 #1
0
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('iperf', server_ip='%s', client_ip=",
            "'%s', role='%s', udp=%s, bidirectional=%s,",
            "test_time=%d, stream_list=%s, tag='%s')"
        ])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #2
0
파일: test.py 프로젝트: yumingfei/autotest
    def _install(self):
        if not self.host:
            from autotest.server import hosts, autotest_remote
            self.host = hosts.create_host(self.job.machines[0],
                                          auto_monitor=False)
            try:
                tmp_dir = self.host.get_tmp_dir(parent="/tmp/sysinfo")
                self.autotest = autotest_remote.Autotest(self.host)
                self.autotest.install(autodir=tmp_dir)
                self.outputdir = self.host.get_tmp_dir()
            except:
                # if installation fails roll back the host
                try:
                    self.host.close()
                except:
                    logging.exception("Unable to close host %s",
                                      self.host.hostname)
                self.host = None
                self.autotest = None
                raise
        else:
            host = self.host

            # if autotest client dir does not exist, reinstall (it may have
            # been removed by the test code)
            autodir = host.get_autodir()
            if not autodir or not host.path_exists(autodir):
                self.autotest.install(autodir=autodir)

            # if the output dir does not exist, recreate it
            if not host.path_exists(self.outputdir):
                host.run('mkdir -p %s' % self.outputdir)

        return self.host, self.autotest, self.outputdir
예제 #3
0
    def _install(self):
        if not self.host:
            from autotest.server import hosts, autotest_remote
            self.host = hosts.create_host(self.job.machines[0],
                                          auto_monitor=False)
            try:
                tmp_dir = self.host.get_tmp_dir(parent="/tmp/sysinfo")
                self.autotest = autotest_remote.Autotest(self.host)
                self.autotest.install(autodir=tmp_dir)
                self.outputdir = self.host.get_tmp_dir()
            except:
                # if installation fails roll back the host
                try:
                    self.host.close()
                except:
                    logging.exception("Unable to close host %s",
                                      self.host.hostname)
                self.host = None
                self.autotest = None
                raise
        else:
            host = self.host

            # if autotest client dir does not exist, reinstall (it may have
            # been removed by the test code)
            autodir = host.get_autodir()
            if not autodir or not host.path_exists(autodir):
                self.autotest.install(autodir=autodir)

            # if the output dir does not exist, recreate it
            if not host.path_exists(self.outputdir):
                host.run('mkdir -p %s' % self.outputdir)

        return self.host, self.autotest, self.outputdir
예제 #4
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('netpipe', server_ip='%s', ",
            "client_ip='%s', role='%s', bidirectional=True, ",
            "buffer_size=%d, upper_bound=%d,"
            "perturbation_size=%d)"
        ])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #5
0
    def run_once(self, pair, udp, bidirectional, time, stream_list):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a lable for the server side tests.
        server_label = 'net_server'

        tagname = "%s_%s" % (pair[0], pair[1])
        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # Ensure the client doesn't have the server label.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are present.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('iperf', server_ip='%s', client_ip=",
                            "'%s', role='%s', udp=%s, bidirectional=%s,",
                            "test_time=%d, stream_list=%s, tag='%s')"])

        server_control_file = template % (server.ip, client.ip, 'server', udp,
                                          bidirectional, time, stream_list,
                                          tagname)
        client_control_file = template % (server.ip, client.ip, 'client', udp,
                                          bidirectional, time, stream_list,
                                          tagname)

        server_command = subcommand.subcommand(server_at.run,
                                               [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                               [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #6
0
    def run_once(self, pair, test, time, stream_list, cycles):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a label for the server side tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IPFilters if they are enabled.
        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join([
            "job.run_test('netperf2', server_ip='%s', ",
            "client_ip='%s', role='%s', test='%s', ",
            "test_time=%d, stream_list=%s, tag='%s', ", "iterations=%d)"
        ])

        server_control_file = template % (server.ip, client.ip, 'server', test,
                                          time, stream_list, test, cycles)
        client_control_file = template % (server.ip, client.ip, 'client', test,
                                          time, stream_list, test, cycles)

        server_command = subcommand.subcommand(
            server_at.run, [server_control_file, server.hostname])
        client_command = subcommand.subcommand(
            client_at.run, [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #7
0
    def run_once(self, pair, test, time, stream_list, cycles):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a label for the server side tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)


        # Disable IPFilters if they are enabled.
        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netperf2', server_ip='%s', ",
                            "client_ip='%s', role='%s', test='%s', ",
                            "test_time=%d, stream_list=%s, tag='%s', ",
                            "iterations=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server', test,
                                          time, stream_list, test, cycles)
        client_control_file = template % (server.ip, client.ip, 'client', test,
                                          time, stream_list, test, cycles)

        server_command = subcommand.subcommand(server_at.run,
                                    [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                    [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('/sbin/iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #8
0
    def run_once(self, pair, buffer, upper_bound, variance):
        print "running on %s and %s\n" % (pair[0], pair[1])

        # Designate a platform label for the server side of tests.
        server_label = 'net_server'

        server = hosts.create_host(pair[0])
        client = hosts.create_host(pair[1])

        # If client has the server_label, then swap server and client.
        platform_label = client.get_platform_label()
        if platform_label == server_label:
            (server, client) = (client, server)

        # Disable IP Filters if they are enabled.
        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.disable_ipfilters()

        server_at = autotest_remote.Autotest(server)
        client_at = autotest_remote.Autotest(client)

        template = ''.join(["job.run_test('netpipe', server_ip='%s', ",
                            "client_ip='%s', role='%s', bidirectional=True, ",
                            "buffer_size=%d, upper_bound=%d,"
                            "perturbation_size=%d)"])

        server_control_file = template % (server.ip, client.ip, 'server',
                                          buffer, upper_bound, variance)
        client_control_file = template % (server.ip, client.ip, 'client',
                                          buffer, upper_bound, variance)

        server_command = subcommand.subcommand(server_at.run,
                                               [server_control_file, server.hostname])
        client_command = subcommand.subcommand(client_at.run,
                                               [client_control_file, client.hostname])

        subcommand.parallel([server_command, client_command])

        for m in [client, server]:
            status = m.run('iptables -L')
            if not status.exit_status:
                m.enable_ipfilters()
예제 #9
0
 def initialize(self, vm_nodes, vm_host,
                force_repair=False,
                kernel="",
                initrd="",
                cmdline="",
                grub="grub"):
     logging.info("Repair nodes initializing...")
     self.grub = grub
     if len(vm_host):
         try:
             self.vm_host = hosts.create_host(vm_host)
         except Exception, e:
             raise error.TestError("failed to create host :" + str(e))
         if not self.vm_host.is_up():
             raise error.TestError("Host is not up")
예제 #10
0
 def initialize(self,
                vm_nodes,
                vm_host,
                force_repair=False,
                kernel="",
                initrd="",
                cmdline="",
                grub="grub"):
     logging.info("Repair nodes initializing...")
     self.grub = grub
     if len(vm_host):
         try:
             self.vm_host = hosts.create_host(vm_host)
         except Exception, e:
             raise error.TestError("failed to create host :" + str(e))
         if not self.vm_host.is_up():
             raise error.TestError("Host is not up")
예제 #11
0
    def _install_clients(self):
        """
        Install autotest_remote on any current job hosts.
        """
        in_use_hosts = set()
        # find hosts in use but not used by us
        for host in self.job.hosts:
            autodir = host.get_autodir()
            if not (autodir and autodir.startswith(PROFILER_TMPDIR)):
                in_use_hosts.add(host.hostname)
        logging.debug('Hosts currently in use: %s', in_use_hosts)

        # determine what valid host objects we already have installed
        profiler_hosts = set()
        for host, at, profiler_dir in list(self.installed_hosts.values()):
            if host.path_exists(profiler_dir):
                profiler_hosts.add(host.hostname)
            else:
                # the profiler was wiped out somehow, drop this install
                logging.warning('The profiler client on %s at %s was deleted',
                                host.hostname, profiler_dir)
                host.close()
                del self.installed_hosts[host.hostname]
        logging.debug('Hosts with profiler clients already installed: %s',
                      profiler_hosts)

        # install autotest_remote on any new hosts in use
        for hostname in in_use_hosts - profiler_hosts:
            host = hosts.create_host(hostname, auto_monitor=False)
            tmp_dir = host.get_tmp_dir(parent=PROFILER_TMPDIR)
            at = autotest_remote.Autotest(host)
            at.install_no_autoserv(autodir=tmp_dir)
            self.installed_hosts[host.hostname] = (host, at, tmp_dir)

        # drop any installs from hosts no longer in job.hosts
        hostnames_to_drop = profiler_hosts - in_use_hosts
        hosts_to_drop = [
            self.installed_hosts[hostname][0] for hostname in hostnames_to_drop
        ]
        for host in hosts_to_drop:
            host.close()
            del self.installed_hosts[host.hostname]
예제 #12
0
class repair_nodes(test.test):
    """
    soft : just restart the vm
    hard : change the boot kernel image
    force: use extern kernel image&initrd to start the vm, and install the
           latest kernel to repair the vm
    """
    version = 1
    vm_host = None
    vm_nodes_list = []
    force_repair = False
    ext_kernel = ""
    ext_initrd = ""
    ext_cmdline_dict = dict()
    grub = "grub"

    def initialize(self,
                   vm_nodes,
                   vm_host,
                   force_repair=False,
                   kernel="",
                   initrd="",
                   cmdline="",
                   grub="grub"):
        logging.info("Repair nodes initializing...")
        self.grub = grub
        if len(vm_host):
            try:
                self.vm_host = hosts.create_host(vm_host)
            except Exception, e:
                raise error.TestError("failed to create host :" + str(e))
            if not self.vm_host.is_up():
                raise error.TestError("Host is not up")

        logging.info("start creating vm nodes...")
        for node in vm_nodes:
            try:
                vm_node = hosts.create_host(node)
            except Exception, e:
                logging.warning("Failed to create.\n" + str(e))
            self.vm_nodes_list.append(vm_node)
            self.ext_cmdline_dict[vm_node.hostname] = cmdline
예제 #13
0
    def _install_clients(self):
        """
        Install autotest_remote on any current job hosts.
        """
        in_use_hosts = set()
        # find hosts in use but not used by us
        for host in self.job.hosts:
            autodir = host.get_autodir()
            if not (autodir and autodir.startswith(PROFILER_TMPDIR)):
                in_use_hosts.add(host.hostname)
        logging.debug('Hosts currently in use: %s', in_use_hosts)

        # determine what valid host objects we already have installed
        profiler_hosts = set()
        for host, at, profiler_dir in self.installed_hosts.values():
            if host.path_exists(profiler_dir):
                profiler_hosts.add(host.hostname)
            else:
                # the profiler was wiped out somehow, drop this install
                logging.warning('The profiler client on %s at %s was deleted',
                                host.hostname, profiler_dir)
                host.close()
                del self.installed_hosts[host.hostname]
        logging.debug('Hosts with profiler clients already installed: %s',
                      profiler_hosts)

        # install autotest_remote on any new hosts in use
        for hostname in in_use_hosts - profiler_hosts:
            host = hosts.create_host(hostname, auto_monitor=False)
            tmp_dir = host.get_tmp_dir(parent=PROFILER_TMPDIR)
            at = autotest_remote.Autotest(host)
            at.install_no_autoserv(autodir=tmp_dir)
            self.installed_hosts[host.hostname] = (host, at, tmp_dir)

        # drop any installs from hosts no longer in job.hosts
        hostnames_to_drop = profiler_hosts - in_use_hosts
        hosts_to_drop = [self.installed_hosts[hostname][0]
                         for hostname in hostnames_to_drop]
        for host in hosts_to_drop:
            host.close()
            del self.installed_hosts[host.hostname]
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs(
                "multi_host_migration")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [x for x in map(lambda x: utils_params.Params(x), test_dicts)]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        for params in test_dicts_ar:

            params['hosts'] = ips

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    params['mac_%s_%s' % (nic, vm)] = generate_mac_address()

            params['master_images_clone'] = "image1"
            params['kill_vm'] = "yes"

            s_host = _hosts[machines[0]]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[0]

            for host_id, machine in enumerate(machines[1:]):
                host = _hosts[machine]
                host.params = params.object_params("host%s" % (host_id + 2))
                params['not_preprocess'] = "yes"
                host.params['clone_master'] = "no"
                host.params['hostid'] = ips[host_id + 1]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            for machine in machines:
                host = _hosts[machine]
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))

            logging.debug('Master control file:\n%s', _hosts[machines[0]].control)
            for machine in machines[1:]:
                host = _hosts[machine]
                logging.debug('Slave control file:\n%s', host.control)

            commands = []

            for machine in machines:
                host = _hosts[machine]
                result_path = os.path.join(self.resultsdir,
                                           host.params["shortname"],
                                           host.host.hostname)
                commands.append(subcommand.subcommand(host.at.run,
                                                      [host.control,
                                                       result_path]))

            try:
                subcommand.parallel(commands)
            except error.AutoservError, e:
                logging.error(e)
예제 #15
0
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        cpu_number = 2**31
        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)
            cpu_number = min(host.host.get_num_cpu(), cpu_number)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs("qemu")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        machine_hold_vm = machines[0]

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [
            x for x in map(lambda x: utils_params.Params(x), test_dicts)
        ]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        keep_macs = {}
        random_cpu_number = random.randint(1, cpu_number)
        for params in test_dicts_ar:

            params['hosts'] = ips
            if params.get("use_randome_smp") == "yes":
                params['smp'] = random_cpu_number

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    if 'mac_%s_%s' % (nic, vm) not in keep_macs:
                        keep_macs['mac_%s_%s' %
                                  (nic, vm)] = generate_mac_address()
                    params['mac_%s_%s' % (nic, vm)] = keep_macs['mac_%s_%s' %
                                                                (nic, vm)]

            s_host = _hosts[machine_hold_vm]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[machines.index(machine_hold_vm)]

            for host_id, machine in enumerate(machines):
                if machine != machine_hold_vm:
                    host = _hosts[machine]
                    host_name = "host%s" % (host_id + 2)
                    host.params = params.object_params("host%s" %
                                                       (host_id + 2))
                    params['not_preprocess'] = "yes"
                    host.params['clone_master'] = "no"
                    host.params['hostid'] = ips[host_id]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            if params.get("need_multi_host") == "yes":
                for machine in machines:
                    host = _hosts[machine]
                    host.control += ("job.run_test('virt', tag='%s',"
                                     " params=%s)" %
                                     (host.params['shortname'], host.params))

                logging.debug('Master control file:\n%s',
                              _hosts[machine_hold_vm].control)
                for machine in machines:
                    if machine != machine_hold_vm:
                        host = _hosts[machine]
                        logging.debug('Slave control file:\n%s', host.control)

                commands = []

                for machine in machines:
                    host = _hosts[machine]
                    result_path = os.path.join(self.resultsdir,
                                               host.host.hostname,
                                               host.params["shortname"])
                    cmd = subcommand.subcommand(host.at.run,
                                                [host.control, result_path])
                    commands.append(cmd)
            else:
                host = _hosts[machine_hold_vm]
                result_path = os.path.join(self.resultsdir, host.host.hostname,
                                           host.params["shortname"])
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))
                logging.debug("Run control file:\n %s", host.control)
                commands = [
                    subcommand.subcommand(host.at.run,
                                          [host.control, result_path])
                ]
            try:
                subcommand.parallel(commands)
                if params.get("vm_migrated") == "yes":
                    # This update based on the logical in test case
                    # migration_multi_host. It use the machines[0] as
                    # src and machines[1] as dst. This may need update
                    # based on different design. Just keep the mahinces
                    # and ips list in the right order for following tests.
                    machine_hold_vm = machines[1]
                    ip_hold_vm = ips[1]
                    machines.remove(machine_hold_vm)
                    ips.remove(ip_hold_vm)

                    if params.get("random_dst_host") == "yes":
                        my_random = random.SystemRandom()
                        dst_machine = my_random.choice(machines)
                        dst_ip = ips[machines.index(dst_machine)]
                    else:
                        dst_machine = machines[0]
                        dst_ip = ips[0]
                    machines.remove(dst_machine)
                    ips.remove(dst_ip)

                    machines.insert(0, machine_hold_vm)
                    machines.insert(1, dst_machine)
                    ips.insert(0, ip_hold_vm)
                    ips.insert(1, dst_ip)

            except error.AutoservError, e:
                logging.error(e)
    def run_once(self, machines, extra_params, cycles):
        VIRT_TYPE = 'qemu'
        VIRT_DIR = data_dir.get_root_dir()
        TEST_DIR = data_dir.get_backend_dir(VIRT_TYPE)
        PROV_DIR = data_dir.get_test_provider_dir('io-github-autotest-qemu')
        SHARED_DIR = os.path.join(VIRT_DIR, 'shared')
        PROV_DIR = os.path.join(PROV_DIR, VIRT_TYPE)

        asset.download_test_provider("io-github-autotest-qemu")
        bootstrap.create_config_files(TEST_DIR, SHARED_DIR, interactive=False)
        bootstrap.create_config_files(TEST_DIR, PROV_DIR, interactive=False)
        bootstrap.create_subtests_cfg(VIRT_TYPE)
        bootstrap.create_guest_os_cfg(VIRT_TYPE)

        sys.path.insert(0, VIRT_DIR)

        CONTROL_MAIN_PART = """
testname = "virt"
bindir = os.path.join(job.testdir, testname)
job.install_pkg(testname, 'test', bindir)

qemu_test_dir = os.path.join(os.environ['AUTODIR'],'tests', 'virt')
sys.path.append(qemu_test_dir)
"""
        logging.info("QEMU test running on hosts %s\n", machines)

        _hosts = {}
        for machine in machines:
            _hosts[machine] = Machines(hosts.create_host(machine))

        cpu_number = 2 ** 31
        for host in _hosts.itervalues():
            host.at = autotest_remote.Autotest(host.host)
            cpu_number = min(host.host.get_num_cpu(), cpu_number)

        cfg_file = os.path.join(TEST_DIR, "cfg", "multi-host-tests.cfg")
        logging.info("CONFIG FILE: '%s' is used for generating"
                     " configuration." % cfg_file)

        if not os.path.exists(cfg_file):
            specific_subdirs = asset.get_test_provider_subdirs("qemu")[0]
            orig_cfg_file = os.path.join(specific_subdirs, "cfg",
                                         "multi-host-tests.cfg")
            if os.path.exists(orig_cfg_file):
                shutil.copy(orig_cfg_file, cfg_file)
            else:
                raise error.JobError("Config file %s was not found", cfg_file)

        # Get test set (dictionary list) from the configuration file
        parser = cartesian_config.Parser()
        parser.parse_file(cfg_file)
        parser.parse_string(extra_params)
        test_dicts = parser.get_dicts()

        ips = []
        for machine in machines:
            host = _hosts[machine]
            ips.append(host.host.ip)

        machine_hold_vm = machines[0]

        logging.info("")
        for i, params in enumerate(test_dicts):
            logging.info("Test    %d:  %s" % (i, params.get("shortname")))
        logging.info("")

        test_dicts = parser.get_dicts()

        test_dicts_ar = [x for x in map(lambda x: utils_params.Params(x), test_dicts)]

        if not test_dicts_ar:
            error.TestNAError("Impossible start any test with"
                              "this configuration.")

        keep_macs = {}
        random_cpu_number = random.randint(1, cpu_number)
        for params in test_dicts_ar:

            params['hosts'] = ips
            if params.get("use_randome_smp") == "yes":
                params['smp'] = random_cpu_number

            for vm in params.get("vms").split():
                for nic in params.get('nics', "").split():
                    if 'mac_%s_%s' % (nic, vm) not in keep_macs:
                        keep_macs['mac_%s_%s' % (nic, vm)] = generate_mac_address()
                    params['mac_%s_%s' % (nic, vm)] = keep_macs['mac_%s_%s' % (nic, vm)]

            s_host = _hosts[machine_hold_vm]
            s_host.params = params.object_params("host1")
            s_host.params['clone_master'] = "yes"
            s_host.params['hostid'] = ips[machines.index(machine_hold_vm)]

            for host_id, machine in enumerate(machines):
                if machine != machine_hold_vm:
                    host = _hosts[machine]
                    host_name = "host%s" % (host_id + 2)
                    host.params = params.object_params("host%s" % (host_id + 2))
                    params['not_preprocess'] = "yes"
                    host.params['clone_master'] = "no"
                    host.params['hostid'] = ips[host_id]

            # Report the parameters we've received
            logging.debug("Test parameters:")
            keys = params.keys()
            keys.sort()
            for key in keys:
                logging.debug("    %s = %s", key, params[key])

            for machine in machines:
                host = _hosts[machine]
                host.control = CONTROL_MAIN_PART

            if params.get("need_multi_host") == "yes":
                for machine in machines:
                    host = _hosts[machine]
                    host.control += ("job.run_test('virt', tag='%s',"
                                     " params=%s)" %
                                     (host.params['shortname'], host.params))

                logging.debug('Master control file:\n%s',
                               _hosts[machine_hold_vm].control)
                for machine in machines:
                    if machine != machine_hold_vm:
                        host = _hosts[machine]
                        logging.debug('Slave control file:\n%s', host.control)

                commands = []

                for machine in machines:
                    host = _hosts[machine]
                    result_path = os.path.join(self.resultsdir,
                                               host.host.hostname,
                                               host.params["shortname"])
                    cmd = subcommand.subcommand(host.at.run,
                                                [host.control,
                                                 result_path])
                    commands.append(cmd)
            else:
                host = _hosts[machine_hold_vm]
                result_path = os.path.join(self.resultsdir,
                                           host.host.hostname,
                                           host.params["shortname"])
                host.control += ("job.run_test('virt', tag='%s', params=%s)" %
                                 (host.params['shortname'], host.params))
                logging.debug("Run control file:\n %s", host.control)
                commands = [subcommand.subcommand(host.at.run,
                                                  [host.control,
                                                   result_path])]
            try:
                subcommand.parallel(commands)
                if params.get("vm_migrated") == "yes":
                    # This update based on the logical in test case
                    # migration_multi_host. It use the machines[0] as
                    # src and machines[1] as dst. This may need update
                    # based on different design. Just keep the mahinces
                    # and ips list in the right order for following tests.
                    machine_hold_vm = machines[1]
                    ip_hold_vm = ips[1]
                    machines.remove(machine_hold_vm)
                    ips.remove(ip_hold_vm)

                    if params.get("random_dst_host") == "yes":
                        my_random = random.SystemRandom()
                        dst_machine = my_random.choice(machines)
                        dst_ip = ips[machines.index(dst_machine)]
                    else:
                        dst_machine = machines[0]
                        dst_ip = ips[0]
                    machines.remove(dst_machine)
                    ips.remove(dst_ip)

                    machines.insert(0, machine_hold_vm)
                    machines.insert(1, dst_machine)
                    ips.insert(0, ip_hold_vm)
                    ips.insert(1, dst_ip)

            except error.AutoservError, e:
                logging.error(e)