def reload(self): """ Causes all threads to be terminated in preparation for running again """ self.stop_threads() self.terminate_event.clear() self.datastore = Datastore()
def test_get_nonexistant_item_raises_keyerror(self): datastore = Datastore() item = sentinel.no_value try: item = datastore.get("NONEXISTANT") except KeyError: return self.fail("Item retrieved for nonexistant key: %s" % item)
def test_get_returns_correct_item(self): # Ensure that calling the get method returns the right value for a # particular key, and that the value is loaded using pickle datastore = Datastore() with patch.dict(datastore._datastore, test_item=sentinel.test_item_value): result = datastore.get("test_item") self.assertEqual(result, sentinel.test_item_value)
def test_get_existing_item_with_default_returns_item(self): # Ensures the item is returned if there is a default provided, # and the item exists datastore = Datastore() expected_value = sentinel.test_value self.mock_copy.deepcopy.side_effect = lambda x: x with patch.dict(datastore._datastore, test_item=expected_value): result = datastore.get("test_item", default=sentinel.default_value) self.assertEqual(result, expected_value)
def test_mapping_has_no_hostname_when_unavailible(self, virt): config = self.create_config('test', None, type='libvirt', server='abc://server/test') datastore = Datastore() virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_NO_HOSTNAME_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" self.run_virt(config, datastore) result = datastore.get(config.name) for host in result.association['hypervisors']: self.assertTrue(host.name is None)
def test_mapping_has_no_hostname_when_unavailible(self, virt): config = Config('test', 'libvirt', server='abc://server/test') datastore = Datastore() virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_NO_HOSTNAME_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" self.run_virt(config, datastore) result = datastore.get(config.name) for host in result.association['hypervisors']: self.assertTrue(host.name is None)
def test_mapping_hypervisor_has_system_uuid(self, virt): config = self.create_config('test', None, type='libvirt', server='abc://server/test') datastore = Datastore() virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" self.run_virt(config, datastore) result = datastore.get(config.name) for host in result.association['hypervisors']: self.assertEqual(host.facts['dmi.system.uuid'], 'this-is-uuid')
def test_mapping_has_hostname_when_availible(self, virt): config = self.create_config('test', None, type='libvirt', server='abc://server/test') datastore = Datastore() virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" virt.return_value.getHostname.return_value = "test_host" self.run_virt(config, datastore) result = datastore.get(config.name) for host in result.association['hypervisors']: self.assertTrue(host.name == "test_host")
def test_read_status_bad_source_credentials(self, virt): config = self.create_config('test', None, type='libvirt') virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" v = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval) v._terminate_event = Event() v._interval = 3600 v._oneshot = True v._createEventLoop = Mock() v.status = True v._send_data = Mock() v.statusConfirmConnection = Mock() v.statusConfirmConnection.side_effect = VirtError( "Incorrect domain/username/password") v.run() v._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance(v._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( v._send_data.mock_calls[0].kwargs['data_to_send'].data['source'] ['server'], None) self.assertEqual( v._send_data.mock_calls[0].kwargs['data_to_send'].data['source'] ['message'], "Incorrect domain/username/password.")
def test_pending_vm(self): client = Mock() client.get_nodes.return_value = self.nodes() client.get_vms.return_value = self.pending_vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='52c01ad890e84b15a1be4be18bd64ecd', name='main', guestIds=[], facts={ Hypervisor.CPU_SOCKET_FACT: '2', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.SYSTEM_UUID_FACT: '52c01ad890e84b15a1be4be18bd64ecd', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.9.1+a0ce1bc657', }) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_getHostGuestMapping_with_hm(self): client = Mock() client.get_nodes.return_value = self.nodes() client.get_vms.return_value = self.vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', hypervisor_id='hostname') with patch.dict('os.environ', {'KUBECONFIG':'/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='minikube', name='master', guestIds=[ Guest( 'f83c5f73-5244-4bd1-90cf-02bac2dda608', kubevirt.CONFIG_TYPE, Guest.STATE_RUNNING, ) ], facts={ Hypervisor.CPU_SOCKET_FACT: '2', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.9.1+a0ce1bc657', } ) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def run_virt(self, config, datastore=None): v = Virt.from_config(self.logger, config, datastore or Datastore()) v._terminate_event = Event() v._interval = 3600 v._oneshot = True v._createEventLoop = Mock() v._run()
def setUp(self, is_pc=False): config = self.create_config(name='test', wrapper=None, type='ahv', server='10.10.10.10', username='******', password='******', owner='owner', prism_central=is_pc) self.ahv = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval)
def test_milicpu(self): client = Mock() client.get_nodes.return_value = self.new_nodes() client.get_vms.return_value = self.vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='52c01ad890e84b15a1be4be18bd64ecd', name='main', guestIds=[ Guest( 'f83c5f73-5244-4bd1-90cf-02bac2dda608', kubevirt.CONFIG_TYPE, Guest.STATE_RUNNING, ) ], facts={ Hypervisor.CPU_SOCKET_FACT: '7', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.SYSTEM_UUID_FACT: '52c01ad890e84b15a1be4be18bd64ecd', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.18.0-rc.1', }) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_get_uses_lock(self): # Ensure a lock is acquired before returning a particular entry in the # store # Ensure that a lock is acquired before updating a particular entry # in the store. # These assertions assume the lock is used as a context manager # and that the lock (when used as a context manager) is acquired by # the calling thread on __enter__ and released by the calling thread # on __exit__ datastore, mock_internal_datastore = self._mock_test_data( Datastore(), test_item=sentinel.test_value) def assert_internal_store_unchanged(*args, **kwargs): # Assert there have been no accesses of the internal datastore mock_internal_datastore.__getitem__.assert_not_called() mock_internal_datastore.__setitem__.assert_not_called() def assert_internal_store_accessed(*args, **kwargs): mock_internal_datastore.__getitem__.assert_called_once_with( "test_item") mock_internal_datastore.__setitem__.assert_not_called() self.mock_lock.__enter__.side_effect = assert_internal_store_unchanged self.mock_lock.__exit__.side_effect = assert_internal_store_accessed datastore.get("test_item") # These assertions assume the lock is used as a context manager # and that the lock (when used as a context manager) is acquired by # the calling thread on __enter__ and released by the calling thread # on __exit__ self.mock_lock.__enter__.assert_called_once() self.mock_lock.__exit__.assert_called_once()
def test_status(self, cfg, _, kube_client): cfg.return_value = Config() kube_client.get_nodes = Mock(return_value=self.nodes()) kube_client.get_vms.return_value = Mock(return_value=self.vms()) self.config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='version', hypervisor_id='hostname') self.config['server'] = 'kubeserver' with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, self.config, Datastore()) kubevirt.status = True kubevirt._send_data = Mock() self.run_once(kubevirt) kubevirt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( kubevirt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['server'], self.config['server'])
def test_oneshot(self, mock_client): expected_assoc = '"well formed HostGuestMapping"' expected_report = HostGuestAssociationReport(self.esx.config, expected_assoc) updateSet = Mock() updateSet.version = 'some_new_version_string' updateSet.truncated = False mock_client.return_value.service.WaitForUpdatesEx.return_value = updateSet datastore = Datastore() self.esx.applyUpdates = Mock() getHostGuestMappingMock = Mock() getHostGuestMappingMock.return_value = expected_assoc self.esx.getHostGuestMapping = getHostGuestMappingMock self.run_once(datastore) result_report = datastore.get(self.esx.config.name) self.assertEqual(expected_report.config.name, result_report.config.name) self.assertEqual(expected_report.config._values, result_report.config._values) self.assertEqual(expected_report._assoc, result_report._assoc)
def setUp(self): config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', env='env') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): self.kubevirt = Virt.from_config(self.logger, config, Datastore())
def test_empty_kubeconfig(self): config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner') kubevirt = Virt.from_config(self.logger, config, Datastore()) self.assertEqual("~/.kube/config", kubevirt._path)
def test_oneshot(self, mock_client): expected_assoc = '"well formed HostGuestMapping"' expected_report = HostGuestAssociationReport(self.esx.config, expected_assoc) updateSet = Mock() updateSet.version = 'some_new_version_string' updateSet.truncated = False mock_client.return_value.service.WaitForUpdatesEx.return_value = updateSet datastore = Datastore() self.esx.applyUpdates = Mock() getHostGuestMappingMock = Mock() getHostGuestMappingMock.return_value = expected_assoc self.esx.getHostGuestMapping = getHostGuestMappingMock self.run_once(datastore) result_report = datastore.get(self.esx.config.name) self.assertEqual(expected_report.config.hash, result_report.config.hash) self.assertEqual(expected_report._assoc, result_report._assoc)
def setUp(self): config = self.create_config(name='test', wrapper=None, type='rhevm', server='localhost', username='******', password=u'1€345678', owner='owner') self.rhevm = Virt.from_config(self.logger, config, Datastore()) self.rhevm.build_urls()
def run_once(self, kubevirt, datastore=None): ''' Run kubevirt in oneshot mode ''' if datastore is None: datastore = Mock(spec=Datastore()) kubevirt.dest = datastore kubevirt._terminate_event = Event() kubevirt._oneshot = True kubevirt._interval = 0 kubevirt._run()
def run_once(self, datastore=None): ''' Run ESX in oneshot mode ''' self.esx._oneshot = True if datastore is None: datastore = Mock(spec=Datastore()) self.esx.dest = datastore self.esx._terminate_event = Event() self.esx._oneshot = True self.esx._interval = 0 self.esx._run()
def test_put_uses_pickle_dumps(self): # Ensure that put uses the return value of pickle.dumps test_item = "test_value" test_key = "test_item" datastore, mock_internal_ds = self.mock_test_data(Datastore(), test_item=test_item) datastore.put(test_key, test_item) self.mock_pickle.dumps.assert_called_with(test_item) expected_value = self.mock_pickle.dumps.return_value mock_internal_ds.__setitem__.assert_called_with( test_key, expected_value)
def test_put_uses_deepcopy(self): # Ensure that put uses the return value of pickle.dumps test_item = "test_value" test_key = "test_item" datastore, mock_internal_ds = self._mock_test_data(Datastore(), test_item=test_item) datastore.put(test_key, test_item) self.mock_copy.deepcopy.assert_called_with(test_item) expected_value = sentinel.deep_copy_value_1 mock_internal_ds.__setitem__.assert_called_with( test_key, expected_value)
def run_once(self, datastore=None): ''' Run generic virt in oneshot mode ''' self.virt._oneshot = True if datastore is None: datastore = Mock(spec=Datastore()) self.virt.dest = datastore self.virt._terminate_event = Event() self.virt._oneshot = True self.virt._interval = 0 self.virt.run()
def setUp(self): config = self.create_config(name='test', wrapper=None, type='rhevm', server='localhost', username='******', password='******', owner='owner', env='env') self.rhevm = Virt.from_config(self.logger, config, Datastore()) self.rhevm.major_version = '3' self.rhevm.build_urls()
def test_status_bad_source_credentials(self, cfg, _, kube_client): cfg.return_value = Config() kube_client.get_nodes = Mock(return_value=self.nodes()) kube_client.get_vms.return_value = Mock(return_value=self.vms()) self.config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='version', hypervisor_id='hostname') self.config['server'] = 'kubeserver' with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, self.config, Datastore()) kubevirt.status = True kubevirt._send_data = Mock() kubevirt.statusConfirmConnection = Mock() kubevirt.statusConfirmConnection.side_effect = VirtError( "Incorrect domain/username/password") kubevirt.dest = Mock(spec=Datastore()) kubevirt._terminate_event = Event() kubevirt._oneshot = True kubevirt._interval = 0 kubevirt.run() kubevirt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( kubevirt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['server'], self.config['server']) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['message'], "Incorrect domain/username/password.")
def __init__(self, logger, options): """ Executor class provides bridge between virtualization supervisor and Subscription Manager. logger - logger instance options - options for virt-who, parsed from command line arguments """ self.logger = logger self.options = options self.terminate_event = Event() self.virts = [] self.destinations = [] # Queue for getting events from virt backends self.datastore = Datastore() self.reloading = False self.dest_to_source_mapper = DestinationToSourceMapper(options) for name, config in self.dest_to_source_mapper.configs: logger.info("Using config named '%s'" % name)
def setUp(self): config = self.create_config(name='test', wrapper=None, type='xen', server='localhost', username='******', password='******', owner='owner', env='env') self.xen = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval)
def test_version_override(self, cfg, _): version = 'v1alpha3' cfg.return_value = Config() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion=version, hypervisor_id='hostname') kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt.prepare() self.assertEqual(version, kubevirt._version)
def test_insecure(self, cfg, _): cfg.return_value = Config() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='v1alpha3', hypervisor_id='hostname', insecure='') kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt.prepare() self.assertFalse(kubevirt._insecure)
def test_get_nonexistant_item_with_default_returns_default(self): # Ensures the default is returned if there is one provided and the # key does not exist datastore = Datastore() result = datastore.get("NONEXISTANT", default=sentinel.default_value) self.assertTrue(result == sentinel.default_value)
class Executor(object): def __init__(self, logger, options): """ Executor class provides bridge between virtualization supervisor and Subscription Manager. logger - logger instance options - options for virt-who, parsed from command line arguments """ self.logger = logger self.options = options self.terminate_event = Event() self.virts = [] self.destinations = [] # Queue for getting events from virt backends self.datastore = Datastore() self.reloading = False self.dest_to_source_mapper = DestinationToSourceMapper(options) for name, config in self.dest_to_source_mapper.configs: logger.info("Using config named '%s'" % name) def _create_virt_backends(self): """ Create virts list with virt backend threads """ virts = [] for name, config in self.dest_to_source_mapper.configs: try: virt = Virt.from_config(self.logger, config, self.datastore, terminate_event=self.terminate_event, interval=self.options[VW_GLOBAL]['interval'], oneshot=self.options[VW_GLOBAL]['oneshot']) except Exception as e: self.logger.error('Unable to use configuration "%s": %s', name, str(e)) continue virts.append(virt) return virts def _create_destinations(self): """Populate self.destinations with a list of list with them @param reset: Whether to kill existing destinations or not, defaults to false @type: bool """ dests = [] for info in self.dest_to_source_mapper.dests: # Dests should already include all destinations we want created # at this time. This method will make no assumptions of creating # defaults of any kind. source_keys = self.dest_to_source_mapper.dest_to_sources_map[info] info.name = "destination_%s" % hash(info) logger = log.getLogger(name=info.name) manager = Manager.fromInfo(logger, self.options, info) dest_class = info_to_destination_class[type(info)] dest = dest_class(config=info, logger=logger, source_keys=source_keys, options=self.options, source=self.datastore, dest=manager, terminate_event=self.terminate_event, interval=self.options[VW_GLOBAL]['interval'], oneshot=self.options[VW_GLOBAL]['oneshot']) dests.append(dest) return dests @staticmethod def wait_on_threads(threads, max_wait_time=None, kill_on_timeout=False): """ Wait for each of the threads in the list to be terminated @param threads: A list of IntervalThread objects to wait on @type threads: list @param max_wait_time: An optional max amount of seconds to wait @type max_wait_time: int @param kill_on_timeout: An optional arg that, if truthy and max_wait_time is defined and exceeded, cause this method to attempt to terminate and join the threads given it. @type kill_on_timeout: bool @return: A list of threads that have not quit yet. Without a max_wait_time this list is always empty (or we are stuck waiting). With a max_wait_time this list will include those threads that have not quit yet. @rtype: list """ delta_time = 1.0 total_waited = 0 threads_not_terminated = list(threads) while len(threads_not_terminated) > 0: if max_wait_time is not None and total_waited > max_wait_time: if kill_on_timeout: Executor.terminate_threads(threads_not_terminated) return [] return threads_not_terminated for thread in threads_not_terminated: if thread.is_terminated(): threads_not_terminated.remove(thread) if not threads_not_terminated: break time.sleep(delta_time) if max_wait_time is not None: total_waited += 1 * 1.0/delta_time return threads_not_terminated @staticmethod def terminate_threads(threads): for thread in threads: thread.stop() if thread.ident: thread.join() def run_oneshot(self): # Start all sources self.virts = self._create_virt_backends() if len(self.virts) == 0: err = "virt-who can't be started: no suitable virt backend found" self.logger.error(err) raise ExitRequest(code=1, message=err) self.destinations = self._create_destinations() if len(self.destinations) == 0: err = "virt-who can't be started: no suitable destinations found" self.logger.error(err) raise ExitRequest(code=1, message=err) for thread in self.virts: thread.start() Executor.wait_on_threads(self.virts) if self.options[VW_GLOBAL]['print']: to_print = {} for source in self.dest_to_source_mapper.sources: try: report = self.datastore.get(source) config = report.config to_print[config.name] = report except KeyError: self.logger.info('Unable to retrieve report for source ' '\"%s\" for printing' % source) return to_print for thread in self.destinations: thread.start() Executor.wait_on_threads(self.destinations) def run(self): self.logger.debug("Starting infinite loop with %d seconds interval", self.options[VW_GLOBAL]['interval']) # Need to update the dest to source mapping of the dest_to_source_mapper object # here because of the way that main reads the config from the command # line # TODO: Update dests to source map on addition or removal of configs self.dest_to_source_mapper.update_dest_to_source_map() # Start all sources self.virts = self._create_virt_backends() if len(self.virts) == 0: err = "virt-who can't be started: no suitable virt backend found" self.logger.error(err) raise ExitRequest(code=1, message=err) self.destinations = self._create_destinations() if len(self.destinations) == 0: err = "virt-who can't be started: no suitable destinations found" self.logger.error(err) raise ExitRequest(code=1, message=err) for thread in self.virts: thread.start() for thread in self.destinations: thread.start() # Interruptibly wait on the other threads to be terminated self.wait_on_threads(self.destinations) raise ExitRequest(code=0) def stop_threads(self): self.terminate_event.set() self.terminate_threads(self.virts) self.terminate_threads(self.destinations) def terminate(self): self.logger.debug("virt-who is shutting down") self.stop_threads() self.virts = [] self.destinations = [] self.datastore = None def reload(self): """ Causes all threads to be terminated in preparation for running again """ self.stop_threads() self.terminate_event.clear() self.datastore = Datastore()