def setUp(self): self.vmnet = mock.MagicMock() self.run_params = utils_params.Params() self.run_params["vms"] = "vm1 vm2" self.run_params["roles"] = "node1 node2" self.run_params["node1"] = "vm1" self.run_params["node2"] = "vm2" self.run_params["nics"] = "b1 b2" self.run_params["nic_roles"] = "internet_nic lan_nic" self.run_params["internet_nic"] = "b1" self.run_params["lan_nic"] = "b2" self.run_params["mac"] = "00:00:00:00:00:00" self.run_params["netmask_b1"] = "255.255.0.0" self.run_params["netmask_b2"] = "255.255.0.0" self.run_params["ip_b1_vm1"] = "10.1.0.1" self.run_params["ip_b2_vm1"] = "172.17.0.1" self.run_params["ip_b1_vm2"] = "10.2.0.1" self.run_params["ip_b2_vm2"] = "172.18.0.1" self.run_params["netdst_b1_vm1"] = "virbr0" self.run_params["netdst_b2_vm1"] = "virbr1" self.run_params["netdst_b1_vm2"] = "virbr2" self.run_params["netdst_b2_vm2"] = "virbr3" self.env = mock.MagicMock(name='env') self.env.get_vm = mock.MagicMock(side_effect=self._get_mock_vm) self.env.create_vm = mock.MagicMock(side_effect=self._create_mock_vm) self.mock_vms = {}
def test_04_VirtNet(self): """ Populate database with max - 1 mac addresses """ try: os.unlink(self.db_filename) except OSError: pass self.zero_counter(25) # setup() method already set LASTBYTE to '-1' for lastbyte in xrange(0, 0xFF): # test_07_VirtNet demands last byte in name and mac match vm_name = "vm%d" % lastbyte if lastbyte < 16: mac = "%s0%x" % (self.mac_prefix, lastbyte) else: mac = "%s%x" % (self.mac_prefix, lastbyte) params = utils_params.Params({ "nics": "nic1", "vms": vm_name, "mac_nic1": mac, }) virtnet = utils_net.VirtNet(params, vm_name, vm_name, self.db_filename) virtnet.mac_prefix = self.mac_prefix self.assertEqual(virtnet['nic1'].mac, mac) self.assertEqual(virtnet.get_mac_address(0), mac) # Confirm only lower-case macs are stored self.assertEqual(virtnet.get_mac_address(0).lower(), virtnet.get_mac_address(0)) self.assertEqual(virtnet.mac_list(), [mac]) self.print_and_inc()
def setUp(self): DummyTestRunning.asserted_tests = [] DummyTestRunning.fail_switch = False self.config = {} self.config["available_vms"] = { "vm1": "only CentOS\n", "vm2": "only Win10\n", "vm3": "only Ubuntu\n" } self.config["available_restrictions"] = ["leaves", "normal", "minimal"] self.config["param_dict"] = {} self.config["vm_strs"] = self.config["available_vms"].copy() self.config["tests_str"] = {} self.config["tests_params"] = utils_params.Params() self.config["vms_params"] = utils_params.Params()
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.iteration = 0 self.resultsdir = None self.file_handler = None self.background_errors = Queue.Queue() super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params self.__params = utils_params.Params(vt_params) self.debugdir = self.logdir self.resultsdir = self.logdir self.timeout = vt_params.get("test_timeout", self.timeout) utils_misc.set_log_file_dir(self.logdir)
def setUp(self): DummyTestRunning.asserted_tests = [] DummyTestRunning.fail_switch = False self.args = mock.MagicMock() self.args.param_str = "" self.args.vm_strs = {} self.run_params = utils_params.Params()
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, tag=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, tag, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ del name options = job.args self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') self.iteration = 0 name = None if options.vt_config: name = vt_params.get("shortname") elif options.vt_type == 'spice': short_name_map_file = vt_params.get("_short_name_map_file") if "tests-variants.cfg" in short_name_map_file: name = short_name_map_file["tests-variants.cfg"] if name is None: name = vt_params.get("_short_name_map_file")["subtests.cfg"] self.outputdir = None self.resultsdir = None self.logfile = None self.file_handler = None self.background_errors = Queue.Queue() self.whiteboard = None super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, tag=tag, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params self.avocado_params = self.params self.params = utils_params.Params(vt_params) self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir)
def __init__(self, methodName='runTest', name=None, params=None, base_logdir=None, job=None, runner_queue=None, vt_params=None): """ :note: methodName, name, base_logdir, job and runner_queue params are inherited from test.Test :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ self.__params_vt = None self.__avocado_params = None self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') # self.__params_vt must be initialized after super params_vt = utils_params.Params(vt_params) # for timeout use Avocado-vt timeout as default but allow # overriding from Avocado params (varianter) self.timeout = params_vt.get("test_timeout", self.timeout) self.iteration = 0 self.resultsdir = None self.file_handler = None self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() super(VirtTest, self).__init__(methodName=methodName, name=name, params=params, base_logdir=base_logdir, job=job, runner_queue=runner_queue) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type", "")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params try: self.__avocado_params = super(VirtTest, self).params except AttributeError: # 36LTS set's `self.params` instead of having it as a property # which stores the avocado params in `self.__params` self.__avocado_params = self.__params self.__params_vt = params_vt self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir) self.__status = None
def test_07_VirtNet(self): """ Release mac from beginning, midle, and end, re-generate + verify value """ self.zero_counter(1) beginning_params = utils_params.Params({ "nics": "nic1 nic2", "vms": "vm0" }) middle_params = utils_params.Params({ "nics": "nic1 nic2", "vms": "vm127" }) end_params = utils_params.Params({ "nics": "nic1 nic2", "vms": "vm255", }) for params in (beginning_params, middle_params, end_params): vm_name = params['vms'] virtnet = utils_net.VirtNet(params, vm_name, vm_name, self.db_filename) virtnet.mac_prefix = self.mac_prefix iface = virtnet['nic1'] last_db_mac_byte = iface.mac_str_to_int_list(iface.mac)[-1] last_vm_name_byte = int(vm_name[2:]) # Sequential generation from test_04_VirtNet guarantee self.assertEqual(last_db_mac_byte, last_vm_name_byte) # only try 300 times, guarantees LASTBYTE counter will loop once self.assertRaises( utils_net.NetError, virtnet.generate_mac_address, 1, 300) virtnet.free_mac_address(0) virtnet.free_mac_address(1) # generate new on nic1 to verify mac_index generator catches it # and to signify database updated after generation virtnet.generate_mac_address(1, 300) last_db_mac_byte = virtnet['nic2'].mac_str_to_int_list( virtnet['nic2'].mac)[-1] self.assertEqual(last_db_mac_byte, last_vm_name_byte) self.assertEqual(virtnet.get_mac_address(1), virtnet[1].mac) self.print_and_inc()
def __init__(self, **kwargs): """ :note: methodName, name, base_logdir, job/config and runner_queue params are inherited from test.Test From the avocado 86 the test.Test uses config instead of job instance. Because of the compatibility with avocado 82.0 LTS we can't remove the job instance. For avocado < 86 job instance is used and for avocado=>86 config is used. :param params: avocado/multiplexer params stored as `self.avocado_params`. :param vt_params: avocado-vt/cartesian_config params stored as `self.params`. """ vt_params = kwargs.pop("vt_params", None) self.__params_vt = None self.__avocado_params = None self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared') # self.__params_vt must be initialized after super params_vt = utils_params.Params(vt_params) # for timeout use Avocado-vt timeout as default but allow # overriding from Avocado params (varianter) self.timeout = params_vt.get("test_timeout", self.timeout) self.iteration = 0 self.resultsdir = None self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() if "methodName" not in kwargs: kwargs["methodName"] = 'runTest' super(VirtTest, self).__init__(**kwargs) self.builddir = os.path.join(self.workdir, 'backends', vt_params.get("vm_type", "")) self.tmpdir = os.path.dirname(self.workdir) # Move self.params to self.avocado_params and initialize virttest # (cartesian_config) params try: self.__avocado_params = super(VirtTest, self).params except AttributeError: # 36LTS set's `self.params` instead of having it as a property # which stores the avocado params in `self.__params` self.__avocado_params = self.__params self.__params_vt = params_vt self.debugdir = self.logdir self.resultsdir = self.logdir utils_misc.set_log_file_dir(self.logdir) self.__status = None self.__exc_info = None
def test_register_vm(self): """ 1) Create an env object. 2) Create a VM and register it from env. 3) Get the vm back from the env. 4) Verify that the 2 objects are the same. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) vm2 = env.get_vm(params['main_vm']) vm2.is_alive() assert vm1 == vm2
def peek(parser, list_of_keys=None): """ Peek into a parsed dictionary. :param parser: parser to get the first variant dictionary from :type parser: Parser object :param list_of_keys: list of parameters key in the final selection :type list_of_keys: [str] or None :returns: the first variant dictionary from the prepared parser :rtype: Params object """ default_params = parser.get_dicts().__next__() if list_of_keys is None: selected_params = default_params else: selected_params = {key: default_params[key] for key in list_of_keys} return utils_params.Params(selected_params)
def __init__(self, queue, runnable): self.__vt_params = utils_params.Params(runnable.kwargs) self.queue = queue self.tmpdir = tempfile.mkdtemp() self.logdir = os.path.join(self.tmpdir, 'results') path.init_dir(self.logdir) self.logfile = os.path.join(self.logdir, 'debug.log') self.log = output.LOG_JOB self.log_level = runnable.config.get('job.output.loglevel', logging.DEBUG) self.env_version = utils_env.get_env_version() self.iteration = 0 self.background_errors = error_event.error_events_bus # clear existing error events self.background_errors.clear() self.debugdir = self.logdir self.bindir = data_dir.get_root_dir() self.virtdir = os.path.join(self.bindir, 'shared')
def test_06_VirtNet(self): """ Generate last possibly mac and verify value. DEPENDS ON test_05_VirtNet running first """ self.zero_counter(25) # test two nics, second mac generation should fail (pool exhausted) params = utils_params.Params({"nics": "nic1 nic2", "vms": "vm255"}) virtnet = utils_net.VirtNet(params, 'vm255', 'vm255', self.db_filename) virtnet.mac_prefix = self.mac_prefix self.assertRaises(AttributeError, virtnet.get_mac_address, 'nic1') mac = "%s%x" % (self.mac_prefix, 255) # This will grab the last available address # only try 300 times, guarantees LASTBYTE counter will loop once self.assertEqual(virtnet.generate_mac_address(0, 300), mac) # This will fail allocation self.assertRaises(utils_net.NetError, virtnet.generate_mac_address, 1, 300)
def import_key_params(self, from_node, to_node): """ This will generate own key configuration at the source vm and foreign key configuration at the destination vm. :param from_node: source node to get the key from (and generate own key configuration on it containing all relevant key information) :type from_node: VMNode object :param to_node: destination node to import the key to (and generate foreign key configuration on it containing all relevant key information) :type to_node: VMNode object """ assert from_node != to_node, "Cannot import key parameters from a vm node to itself" if from_node not in [self.left, self.right]: raise ValueError( "The keys are not imported from any of the tunnel end points %s and %s and " "%s is not one of them" % (self.left.name, self.right.name, from_node.name)) if to_node not in [self.left, self.right]: raise ValueError( "The keys are not imported to any of the tunnel end points %s and %s and " "%s is not one of them" % (self.left.name, self.right.name, to_node.name)) from_vm, to_vm = from_node.platform, to_node.platform own_key_params = utils_params.Params( {"vpnconn_own_key_name": "sample-key"}) from_vm.params.update(own_key_params) def get_imported_key_params(from_params): to_params = from_params.copy() to_params["vpnconn_foreign_key_name"] = from_params[ "vpnconn_own_key_name"] del to_params["vpnconn_own_key_name"] return to_params foreign_key_params = get_imported_key_params(own_key_params) to_vm.params.update(foreign_key_params) raise NotImplementedError( "Public key authentication is not implemented for any guest OS")
def test_05_VirtNet(self): """ Load max - 1 entries from db, overriding params. DEPENDS ON test_04_VirtNet running first """ self.zero_counter(25) # second loop forces db load from disk # also confirming params merge with db data for lastbyte in xrange(0, 0xFF): vm_name = "vm%d" % lastbyte params = utils_params.Params({"nics": "nic1", "vms": vm_name}) virtnet = utils_net.VirtNet(params, vm_name, vm_name, self.db_filename) if lastbyte < 16: mac = "%s0%x" % (self.mac_prefix, lastbyte) else: mac = "%s%x" % (self.mac_prefix, lastbyte) self.assertEqual(virtnet['nic1'].mac, mac) self.assertEqual(virtnet.get_mac_address(0), mac) self.print_and_inc()
def external_data_file_defined_by_params(cls, params, root_dir, tag): """Link image to an external data file.""" enable_data_file = params.get("enable_data_file", "no") == "yes" image_format = params.get("image_format", "qcow2") if not enable_data_file: return if image_format != "qcow2": raise ValueError("The %s format does not support external " "data file" % image_format) image_size = params["image_size"] base_name = os.path.basename(params["image_name"]) data_file_path = params.get( "image_data_file_path", os.path.join(root_dir, "images", "%s.data_file" % base_name)) data_file_params = utils_params.Params({ "image_name": data_file_path, "image_format": "raw", "image_size": image_size, "image_raw_device": "yes" }) return cls(data_file_params, root_dir, "%s_data_file" % tag)
def test_unregister_vm(self): """ 1) Create an env object. 2) Register 2 vms to the env. 3) Verify both vms are in the env. 4) Remove one of those vms. 5) Verify that the removed vm is no longer in env. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() env.unregister_vm('vm2') assert vm1 in env.get_all_vms() assert vm2 not in env.get_all_vms()
def test_get_all_vms(self): """ 1) Create an env object. 2) Create 2 vms and register them in the env. 3) Create a SyncListenServer and register it in the env. 4) Verify that the 2 vms are in the output of get_all_vms. 5) Verify that the sync server is not in the output of get_all_vms. """ env = utils_env.Env(filename=self.envfilename) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() vm2 = FakeVm('vm2', params) vm2.is_alive() env.register_vm(params['main_vm'], vm1) env.register_vm('vm2', vm2) sync1 = FakeSyncListenServer(port=333) env.register_syncserver(333, sync1) assert vm1 in env.get_all_vms() assert vm2 in env.get_all_vms() assert sync1 not in env.get_all_vms()
def test_cmp_Virtnet(self): self.zero_counter() to_test = 600 # Random generator slows this test way down for fakevm1 in self.fakevm_generator(): to_test -= 1 if to_test < 1: break fvm1p = fakevm1.get_params() fakevm1.virtnet = utils_net.VirtNet(fvm1p, fakevm1.name, fakevm1.instance, self.db_filename) if len(fakevm1.virtnet) < 2: continue fakevm2 = FakeVm(fakevm1.name + "_2", fvm1p) fakevm2.virtnet = utils_net.VirtNet(fvm1p, fakevm2.name, fakevm2.instance, self.db_filename) # Verify nic order doesn't matter fvm3p = utils_params.Params(fvm1p.items()) # work on copy nic_list = fvm1p.object_params(fakevm1.name).get( "nics", fvm1p.get('nics', "")).split() random.shuffle(nic_list) fvm3p['nics'] = " ".join(nic_list) fakevm3 = FakeVm(fakevm1.name + "_3", fvm3p) fakevm3.virtnet = utils_net.VirtNet(fvm3p, fakevm3.name, fakevm3.instance, self.db_filename) self.assertTrue(fakevm1.virtnet == fakevm1.virtnet) self.assertTrue(fakevm1.virtnet == fakevm2.virtnet) self.assertTrue(fakevm1.virtnet == fakevm3.virtnet) self.assertTrue(fakevm2.virtnet == fakevm3.virtnet) if len(fakevm1.virtnet) > 1: del fakevm1.virtnet[0] self.assertFalse(fakevm1.virtnet == fakevm2.virtnet) self.assertFalse(fakevm1.virtnet == fakevm3.virtnet) self.assertTrue(fakevm1.virtnet != fakevm2.virtnet) self.assertTrue(fakevm1.virtnet != fakevm3.virtnet) self.print_and_inc()
def setUp(self): self.vmnet = mock.MagicMock() self.run_params = utils_params.Params() self.run_params["vms"] = "vm1 vm2" self.run_params["nics"] = "b1 b2" self.run_params["nic_roles"] = "internet_nic lan_nic" self.run_params["internet_nic"] = "b1" self.run_params["lan_nic"] = "b2" self.run_params["mac"] = "00:00:00:00:00:00" self.run_params["netmask_b1"] = "255.255.0.0" self.run_params["netmask_b2"] = "255.255.0.0" self.run_params["ip_b1_vm1"] = "10.1.1.1" self.run_params["ip_b2_vm1"] = "172.17.1.1" self.run_params["ip_b1_vm2"] = "10.2.1.1" self.run_params["ip_b2_vm2"] = "172.18.1.1" self.env = mock.MagicMock(name='env') self.env.get_vm = mock.MagicMock(side_effect=self._get_mock_vm) self.mock_vms = {} # inline class definition and instantiation self.test = type('obj', (object, ), {'outputdir': ''})()
def setUp(self): """ Runs before every test """ logging.disable(logging.INFO) logging.disable(logging.WARNING) # MAC generator produces from incrementing byte list # at random starting point (class property). # make sure it starts counting at zero before every test utils_net.VirtIface.LASTBYTE = -1 # These warnings are annoying during testing utils_net.VMNet.DISCARD_WARNINGS - 1 parser = cartesian_config.Parser() parser.parse_string(self.nettests_cartesian) self.CartesianResult = [] for d in parser.get_dicts(): params = utils_params.Params(d) self.CartesianResult.append(params) for vm_name in params.objects('vms'): vm = params.object_params(vm_name) nics = vm.get('nics') if nics and len(nics.split()) > 0: self.db_item_count += 1
def test_save(self): """ 1) Verify that calling env.save() with no filename where env doesn't specify a filename will throw an EnvSaveError. 2) Register a VM in environment, save env to a file, recover env from that file, get the vm and verify that the instance attribute of the 2 objects is the same. 3) Register a SyncListenServer and don't save env. Restore env from file and try to get the syncserver, verify it doesn't work. 4) Now save env to a file, restore env from file and verify that the syncserver can be found there, and that the sync server instance attribute is equal to the initial sync server instance. """ env = utils_env.Env() self.assertRaises(utils_env.EnvSaveError, env.save, {}) params = utils_params.Params({"main_vm": 'rhel7-migration'}) vm1 = FakeVm(params['main_vm'], params) vm1.is_alive() env.register_vm(params['main_vm'], vm1) env.save(filename=self.envfilename) env2 = utils_env.Env(filename=self.envfilename) vm2 = env2.get_vm(params['main_vm']) vm2.is_alive() assert vm1.instance == vm2.instance sync1 = FakeSyncListenServer(port=222) env.register_syncserver(222, sync1) env3 = utils_env.Env(filename=self.envfilename) syncnone = env3.get_syncserver(222) assert syncnone is None env.save(filename=self.envfilename) env4 = utils_env.Env(filename=self.envfilename) sync2 = env4.get_syncserver(222) assert sync2.instance == sync1.instance
def __init__(self, name, node1, node2, local1=None, remote1=None, peer1=None, auth=None): """ Construct the full set of required tunnel parameters for a given tunnel left configuration that are not already defined in the parameters of the two vms (left `node1` with right `node2`). :param str name: name of the tunnel :param node1: left side node of the tunnel :type node1: VMNode object :param node2: right side node of the tunnel :type node2: VMNode object :param local1: left local configuration with at least one key 'type' with value 'nic' for left-site (could be used for site-to-site or site-to-point tunnels) or 'internetip' for left-point (for point-to-site or point-to-point tunnels) or 'custom' for left-site or left-point that is not a LAN (e.g. for tunnel forwarding of another tunneled remote net) :type local1: {str, str} :param remote1: left remote configuration with at least one key 'type' with value 'custom' for right-site (could be used for site-to-site or point-to-site tunnels) or 'externalip' for right-point (for site-to-point or point-to-point tunnels) or 'modeconfig' for special right-point (using a ModeConfig connection for a right road warrior) :type remote1: {str, str} :param peer1: left peer configuration with at least one key 'type' with value 'ip' for no NAT along the tunnel (the peer having a public IP) or 'dynip' for a road warrior right end point (the peer is behind NAT and its IP is changing) :type peer1: {str, str} :param auth: authentication configuration with at least one key 'type' with value in "pubkey", "psk", "none" and the rest of the keys providing type details :type auth: {str, str} :raises: :py:class:`ValueError` if some of the supplied configuration is not valid The right side `local2`, `remote2`, `peer2` configuration is determined from the left side. If a PSK (pre-shared secret) authentication type is specified, the relevant additional options are `psk` for the secret word, `left_id` and `right_id` for the identification type to be used on each side (either IP for empty id or any user-defined id). """ logging.info("Preparing tunnel parameters for each of %s and %s", node1.name, node2.name) if local1 is None: local1 = {"type": "nic", "nic": "lan_nic"} if remote1 is None: remote1 = {"type": "custom", "nic": "lan_nic"} if peer1 is None: peer1 = {"type": "ip", "nic": "internet_nic"} local2, remote2, peer2 = self._get_peer_variant(local1, remote1, peer1) params = utils_params.Params() # main parameters params["vpnconn_%s_%s" % (name, node1.name)] = name params["vpnconn_%s_%s" % (name, node2.name)] = name params["vpn_side_%s_%s" % (name, node1.name)] = "left" params["vpn_side_%s_%s" % (name, node2.name)] = "right" params["vpnconn_lan_type_%s_%s" % (name, node1.name)] = local1["type"].upper() params["vpnconn_lan_type_%s_%s" % (name, node2.name)] = local2["type"].upper() params["vpnconn_remote_type_%s_%s" % (name, node1.name)] = remote1["type"].upper() params["vpnconn_remote_type_%s_%s" % (name, node2.name)] = remote2["type"].upper() if local1["type"] == "nic": netconfig1 = node1.interfaces[node1.params[local1.get( "nic", "lan_nic")]].netconfig params["vpnconn_lan_net_%s_%s" % (name, node1.name)] = netconfig1.net_ip params["vpnconn_lan_netmask_%s_%s" % (name, node1.name)] = netconfig1.netmask params["vpnconn_remote_net_%s_%s" % (name, node2.name)] = netconfig1.net_ip params["vpnconn_remote_netmask_%s_%s" % (name, node2.name)] = netconfig1.netmask elif local1["type"] == "internetip": netconfig1 = None elif local1["type"] == "custom": # "custom" configuration does no guarantee pre-existing netconfig like "nic" # so create an address/netmask only netconfig to match against for compatibility netconfig1 = VMNetconfig() netconfig1.net_ip = local1["lnet"] netconfig1.netmask = local1["lmask"] params["vpnconn_lan_net_%s_%s" % (name, node1.name)] = local1["lnet"] params["vpnconn_lan_netmask_%s_%s" % (name, node1.name)] = local1["lmask"] else: raise ValueError( "Invalid choice of left local type '%s', must be one of" " 'nic', 'internetip', 'custom'" % local1["type"]) if remote1["type"] == "custom": if local1["type"] == "custom": netconfig2 = VMNetconfig() netconfig2.net_ip = local1["rnet"] netconfig2.netmask = local1["rmask"] params["vpnconn_lan_net_%s_%s" % (name, node2.name)] = local1["rnet"] params["vpnconn_lan_netmask_%s_%s" % (name, node2.name)] = local1["rmask"] else: netconfig2 = node2.interfaces[node2.params[remote1.get( "nic", "lan_nic")]].netconfig params["vpnconn_lan_net_%s_%s" % (name, node2.name)] = netconfig2.net_ip params["vpnconn_lan_netmask_%s_%s" % (name, node2.name)] = netconfig2.netmask params["vpnconn_remote_net_%s_%s" % (name, node1.name)] = netconfig2.net_ip params["vpnconn_remote_netmask_%s_%s" % (name, node1.name)] = netconfig2.netmask elif remote1["type"] == "externalip": netconfig2 = None elif remote1["type"] == "modeconfig": netconfig2 = None params["vpnconn_remote_modeconfig_ip_%s_%s" % (name, node1.name)] = remote1["modeconfig_ip"] else: raise ValueError( "Invalid choice of left remote type '%s', must be one of" " 'custom', 'externalip', or 'modeconfig'" % remote1["type"]) # road warrior parameters params["vpnconn_peer_type_%s_%s" % (name, node1.name)] = peer1["type"].upper() if peer1["type"] == "ip": interface2 = node2.interfaces[node2.params[peer1.get( "nic", "internet_nic")]] params["vpnconn_peer_ip_%s_%s" % (name, node1.name)] = interface2.ip params["vpnconn_activation_%s_%s" % (name, node1.name)] = "ALWAYS" elif peer1["type"] == "dynip": interface2 = node2.interfaces[node2.params[peer1.get( "nic", "internet_nic")]] params["vpnconn_activation_%s_%s" % (name, node1.name)] = "PASSIVE" else: raise ValueError( "Invalid choice of left peer type '%s', must be one of" " 'ip', 'dynip'" % peer1["type"]) params["vpnconn_peer_type_%s_%s" % (name, node2.name)] = peer2["type"].upper() interface1 = node1.interfaces[node1.params[peer2.get( "nic", "internet_nic")]] params["vpnconn_peer_ip_%s_%s" % (name, node2.name)] = interface1.ip params["vpnconn_activation_%s_%s" % (name, node2.name)] = "ALWAYS" # authentication parameters if auth is None: params["vpnconn_key_type_%s" % name] = "NONE" elif auth["type"] == "pubkey": params["vpnconn_key_type_%s" % name] = "PUBLIC" elif auth["type"] == "psk": params["vpnconn_key_type_%s" % name] = "PSK" psk = auth["psk"] left_id = auth["left_id"] left_id_type = "IP" if left_id == "" else "CUSTOM" right_id = auth["right_id"] right_id_type = "IP" if right_id == "" else "CUSTOM" params["vpnconn_psk_%s" % name] = psk params["vpnconn_psk_foreign_id_%s_%s" % (name, node1.name)] = right_id params["vpnconn_psk_foreign_id_type_%s_%s" % (name, node1.name)] = right_id_type params["vpnconn_psk_own_id_%s_%s" % (name, node1.name)] = left_id params["vpnconn_psk_own_id_type_%s_%s" % (name, node1.name)] = left_id_type params["vpnconn_psk_foreign_id_%s_%s" % (name, node2.name)] = left_id params["vpnconn_psk_foreign_id_type_%s_%s" % (name, node2.name)] = left_id_type params["vpnconn_psk_own_id_%s_%s" % (name, node2.name)] = right_id params["vpnconn_psk_own_id_type_%s_%s" % (name, node2.name)] = right_id_type else: raise ValueError( "Invalid choice of authentication type '%s', must be one of" " 'pubkey', 'psk', or 'none'" % auth["type"]) # overwrite the base vpn parameters with other already defined tunnel parameters params1 = params.object_params(node1.name) params2 = params.object_params(node2.name) params1.update(node1.params) params2.update(node2.params) node1.params = params1 node2.params = params2 self._params = params self._left = node1 self._left_iface = interface1 self._left_net = netconfig1 self._right = node2 self._right_iface = interface2 self._right_net = netconfig2 self._name = name logging.info("Produced tunnel from parameters is %s", self)
def setUp(self): self.params = utils_params.Params(BASE_DICT)
def run_once(self, params): # Convert params to a Params object params = utils_params.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise exceptions.TestSkipError("Test dependency failed") # Report virt test version logging.info(version.get_pretty_version_info()) # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file custom_env_path = params.get("custom_env_path", "") if custom_env_path: env_path = custom_env_path else: env_path = params.get("vm_type") env_filename = os.path.join(self.bindir, "backends", env_path, params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) other_subtests_dirs = params.get("other_tests_dirs", "") test_passed = False t_type = None try: try: try: subtest_dirs = [] bin_dir = self.bindir for d in other_subtests_dirs.split(): # Replace split char. d = os.path.join(*d.split("/")) subtestdir = os.path.join(bin_dir, d, "tests") if not os.path.isdir(subtestdir): raise exceptions.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs += data_dir.SubdirList(subtestdir, bootstrap.test_filter) # Verify if we have the correspondent source file for it for generic_subdir in asset.get_test_provider_subdirs('generic'): subtest_dirs += data_dir.SubdirList(generic_subdir, bootstrap.test_filter) for multi_host_migration_subdir in asset.get_test_provider_subdirs( 'multi_host_migration'): subtest_dirs += data_dir.SubdirList(multi_host_migration_subdir, bootstrap.test_filter) for specific_subdir in asset.get_test_provider_subdirs(params.get("vm_type")): subtest_dirs += data_dir.SubdirList(specific_subdir, bootstrap.test_filter) subtest_dir = None # Get the test routine corresponding to the specified # test type logging.debug("Searching for test modules that match " "'type = %s' and 'provider = %s' " "on this cartesian dict", params.get("type"), params.get("provider", None)) t_types = params.get("type").split() provider = params.get("provider", None) if provider is not None: subtest_dirs = [ d for d in subtest_dirs if provider in d] # Make sure we can load provider_lib in tests for s in subtest_dirs: if os.path.dirname(s) not in sys.path: sys.path.insert(0, os.path.dirname(s)) test_modules = {} for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on tests" "dirs %s" % (t_type, subtest_dirs)) raise exceptions.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules[t_type] = imp.load_module(t_type, f, p, d) f.close() # Preprocess try: params = env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type in t_types: test_module = test_modules[t_type] run_func = utils_misc.get_test_entrypoint_func( t_type, test_module) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: raise exceptions.TestWarn("funcatexit failed with: %s" % error_message) except Exception as e: if t_type is not None: error_message = funcatexit.run_exitfuncs(env, t_type) if error_message: logging.error(error_message) logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error( self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception as e: if test_passed: raise logging.error("Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception as e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_create_command()) raise exceptions.JobError("Abort requested (%s)" % e)
def run_once(self, params): # Convert params to a Params object params = utils_params.Params(params) # If a dependency test prior to this test has failed, let's fail # it right away as TestNA. if params.get("dependency_failed") == 'yes': raise error.TestNAError("Test dependency failed") # Report the parameters we've received and write them as keyvals logging.debug("Test parameters:") keys = params.keys() keys.sort() for key in keys: logging.debug(" %s = %s", key, params[key]) self.write_test_keyval({key: params[key]}) # Set the log file dir for the logging mechanism used by kvm_subprocess # (this must be done before unpickling env) utils_misc.set_log_file_dir(self.debugdir) # Open the environment file env_filename = os.path.join(self.bindir, params.get("vm_type"), params.get("env", "env")) env = utils_env.Env(env_filename, self.env_version) test_passed = False try: try: try: subtest_dirs = [] tests_dir = self.job.testdir other_subtests_dirs = params.get("other_tests_dirs", "") for d in other_subtests_dirs.split(): # Replace split char. d = os.path.join(*d.split("/")) subtestdir = os.path.join(tests_dir, d, "tests") if not os.path.isdir(subtestdir): raise error.TestError("Directory %s not" " exist." % (subtestdir)) subtest_dirs.append(subtestdir) # Verify if we have the correspondent source file for it virt_dir = os.path.dirname(self.virtdir) subtest_dirs.append(os.path.join(virt_dir, "tests")) subtest_dirs.append( os.path.join(self.bindir, params.get("vm_type"), "tests")) subtest_dir = None # Get the test routine corresponding to the specified # test type t_types = params.get("type").split() test_modules = [] for t_type in t_types: for d in subtest_dirs: module_path = os.path.join(d, "%s.py" % t_type) if os.path.isfile(module_path): subtest_dir = d break if subtest_dir is None: msg = ("Could not find test file %s.py on tests" "dirs %s" % (t_type, subtest_dirs)) raise error.TestError(msg) # Load the test module f, p, d = imp.find_module(t_type, [subtest_dir]) test_modules.append( (t_type, imp.load_module(t_type, f, p, d))) f.close() # Preprocess try: env_process.preprocess(self, params, env) finally: env.save() # Run the test function for t_type, test_module in test_modules: msg = "Running function: %s.run_%s()" % (t_type, t_type) logging.info(msg) run_func = getattr(test_module, "run_%s" % t_type) try: run_func(self, params, env) self.verify_background_errors() finally: env.save() test_passed = True except Exception, e: logging.error("Test failed: %s: %s", e.__class__.__name__, e) try: env_process.postprocess_on_error(self, params, env) finally: env.save() raise finally: # Postprocess try: try: env_process.postprocess(self, params, env) except Exception, e: if test_passed: raise logging.error( "Exception raised during " "postprocessing: %s", e) finally: env.save() except Exception, e: if params.get("abort_on_error") != "yes": raise # Abort on error logging.info("Aborting job (%s)", e) if params.get("vm_type") == "qemu": for vm in env.get_all_vms(): if vm.is_dead(): continue logging.info("VM '%s' is alive.", vm.name) for m in vm.monitors: logging.info( "'%s' has a %s monitor unix socket at: %s", vm.name, m.protocol, m.filename) logging.info( "The command line used to start '%s' was:\n%s", vm.name, vm.make_qemu_command()) raise error.JobError("Abort requested (%s)" % e)