def test_status_error(self): config_values = { 'type': 'virt', 'server': 'localhost', 'username': '******', 'password': '******', 'owner': 'owner', } config = VirtConfigSection('test', None) config.update(**config_values) self.virt = Virt(self.logger, config, None, interval=60) # No dest given here self.virt.status = True self.virt._send_data = Mock() self.virt._run = Mock( side_effect=VirtError('unable to connect to source')) self.run_once() self.virt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( self.virt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( self.virt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['message'], 'unable to connect to source.')
def test_read_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=true file=%s """ % self.hypervisor_file) manager = ConfigManager(self.logger, self.config_dir) self.assertEquals(len(manager.configs), 1) virt = Virt.fromConfig(self.logger, manager.configs[0]) self.assertEquals(type(virt), FakeVirt) mapping = virt.getHostGuestMapping() self.assertTrue("hypervisors" in mapping) hypervisors = mapping["hypervisors"] self.assertEquals(len(hypervisors), 1) hypervisor = hypervisors[0] self.assertEquals(type(hypervisor), Hypervisor) self.assertEquals(hypervisor.hypervisorId, "60527517-6284-7593-6AAB-75BF2A6375EF") self.assertEquals(len(hypervisor.guestIds), 1) guest = hypervisor.guestIds[0] self.assertEquals(guest.uuid, "07ED8178-95D5-4244-BC7D-582A54A48FF8") self.assertEquals(guest.state, 1)
def setUp(self, is_pc=False): config = self.create_config(name='test', wrapper=None, type='ahv', server='10.10.10.10', username='******', password='******', owner='owner', prism_central=is_pc) self.ahv = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval)
def test_staus(self): with open(self.hypervisor_file, "w") as f: f.write(NON_HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(f""" [test] type=fake is_hypervisor=false file={self.hypervisor_file}""") effective_config = init_config({}, config_dir=self.config_dir) manager = DestinationToSourceMapper(effective_config) self.assertEqual(len(manager.configs), 1) self.fake = Virt.from_config(self.logger, manager.configs[0][1], None) self.fake.status = True self.fake._send_data = Mock() self.run_once() self.fake._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( self.fake._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( self.fake._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['server'], None)
def test_read_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=true owner=taylor env=swift file=%s """ % self.hypervisor_file) effective_config = init_config({}, {}, config_dir=self.config_dir) manager = DestinationToSourceMapper(effective_config) self.assertEqual(len(manager.configs), 1) virt = Virt.from_config(self.logger, manager.configs[0][1], None) self.assertEqual(type(virt), FakeVirt) mapping = virt.getHostGuestMapping() self.assertTrue("hypervisors" in mapping) hypervisors = mapping["hypervisors"] self.assertEqual(len(hypervisors), 1) hypervisor = hypervisors[0] self.assertEqual(type(hypervisor), Hypervisor) self.assertEqual(hypervisor.hypervisorId, "60527517-6284-7593-6AAB-75BF2A6375EF") self.assertEqual(len(hypervisor.guestIds), 1) guest = hypervisor.guestIds[0] self.assertEqual(guest.uuid, "07ED8178-95D5-4244-BC7D-582A54A48FF8") self.assertEqual(guest.state, 1)
def test_getHostGuestMapping(self): client = Mock() client.get_nodes.return_value = self.nodes() client.get_vms.return_value = self.vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts') with patch.dict('os.environ', {'KUBECONFIG':'/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='52c01ad890e84b15a1be4be18bd64ecd', name='master', guestIds=[ Guest( 'f83c5f73-5244-4bd1-90cf-02bac2dda608', kubevirt.CONFIG_TYPE, Guest.STATE_RUNNING, ) ], facts={ Hypervisor.CPU_SOCKET_FACT: '2', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.9.1+a0ce1bc657', } ) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_getHostGuestMapping_with_hm(self): client = Mock() client.get_nodes.return_value = self.nodes() client.get_vms.return_value = self.vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', hypervisor_id='hostname') with patch.dict('os.environ', {'KUBECONFIG':'/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='minikube', name='master', guestIds=[ Guest( 'f83c5f73-5244-4bd1-90cf-02bac2dda608', kubevirt.CONFIG_TYPE, Guest.STATE_RUNNING, ) ], facts={ Hypervisor.CPU_SOCKET_FACT: '2', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.9.1+a0ce1bc657', } ) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_pending_vm(self): client = Mock() client.get_nodes.return_value = self.nodes() client.get_vms.return_value = self.pending_vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='52c01ad890e84b15a1be4be18bd64ecd', name='main', guestIds=[], facts={ Hypervisor.CPU_SOCKET_FACT: '2', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.SYSTEM_UUID_FACT: '52c01ad890e84b15a1be4be18bd64ecd', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.9.1+a0ce1bc657', }) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_milicpu(self): client = Mock() client.get_nodes.return_value = self.new_nodes() client.get_vms.return_value = self.vms() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt._client = client expected_result = Hypervisor( hypervisorId='52c01ad890e84b15a1be4be18bd64ecd', name='main', guestIds=[ Guest( 'f83c5f73-5244-4bd1-90cf-02bac2dda608', kubevirt.CONFIG_TYPE, Guest.STATE_RUNNING, ) ], facts={ Hypervisor.CPU_SOCKET_FACT: '7', Hypervisor.HYPERVISOR_TYPE_FACT: 'qemu', Hypervisor.SYSTEM_UUID_FACT: '52c01ad890e84b15a1be4be18bd64ecd', Hypervisor.HYPERVISOR_VERSION_FACT: 'v1.18.0-rc.1', }) result = kubevirt.getHostGuestMapping()['hypervisors'][0] self.assertEqual(expected_result.toDict(), result.toDict())
def test_status(self, cfg, _, kube_client): cfg.return_value = Config() kube_client.get_nodes = Mock(return_value=self.nodes()) kube_client.get_vms.return_value = Mock(return_value=self.vms()) self.config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='version', hypervisor_id='hostname') self.config['server'] = 'kubeserver' with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, self.config, Datastore()) kubevirt.status = True kubevirt._send_data = Mock() self.run_once(kubevirt) kubevirt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( kubevirt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['server'], self.config['server'])
def run_virt(self, config, datastore=None): v = Virt.from_config(self.logger, config, datastore or Datastore()) v._terminate_event = Event() v._interval = 3600 v._oneshot = True v._createEventLoop = Mock() v._run()
def test_read_status_bad_source_credentials(self, virt): config = self.create_config('test', None, type='libvirt') virt.return_value.getCapabilities.return_value = LIBVIRT_CAPABILITIES_XML virt.return_value.getType.return_value = "LIBVIRT_TYPE" virt.return_value.getVersion.return_value = "VERSION 1337" v = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval) v._terminate_event = Event() v._interval = 3600 v._oneshot = True v._createEventLoop = Mock() v.status = True v._send_data = Mock() v.statusConfirmConnection = Mock() v.statusConfirmConnection.side_effect = VirtError( "Incorrect domain/username/password") v.run() v._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance(v._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( v._send_data.mock_calls[0].kwargs['data_to_send'].data['source'] ['server'], None) self.assertEqual( v._send_data.mock_calls[0].kwargs['data_to_send'].data['source'] ['message'], "Incorrect domain/username/password.")
def run_virt(self, config, datastore=None): v = Virt.from_config(self.logger, config, datastore or Datastore(), interval=DefaultInterval) v._terminate_event = Event() v._interval = 3600 v._oneshot = True v._createEventLoop = Mock() v._run()
def test_empty_kubeconfig(self): config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner') kubevirt = Virt.from_config(self.logger, config, Datastore()) self.assertEqual("~/.kube/config", kubevirt._path)
def setUp(self): config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', env='env') with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): self.kubevirt = Virt.from_config(self.logger, config, Datastore())
def setUp(self): config = self.create_config(name='test', wrapper=None, type='rhevm', server='localhost', username='******', password=u'1€345678', owner='owner') self.rhevm = Virt.from_config(self.logger, config, Datastore()) self.rhevm.build_urls()
def check_virt_connection(self, config): queue = Queue() event = Event() errors = [] virt = Virt.fromConfig(self.logger, config) setattr(virt, 'extra_errors', tempfile.NamedTemporaryFile(prefix='vit-who-error')) # vdsm subprocess will output to stdout, so redirect it to a tempfile def _getLocalVdsName(tsPath): p = subprocess.Popen([ '/usr/bin/openssl', 'x509', '-noout', '-subject', '-in', '%s/certs/vdsmcert.pem' % tsPath ], stderr=virt.extra_errors, stdout=virt.extra_errors, close_fds=True) out, err = p.communicate() if p.returncode != 0: return '0' return re.search('/CN=([^/$\n]+)', out).group(1) if isinstance(virt, Vdsm): virt._getLocalVdsName = _getLocalVdsName try: # Prevent any warning messages to be printed out to the screen. For example: # certificate warning. Print them to the log instead. out = StringIO.StringIO() orig_stdout = sys.stdout orig_stderr = sys.stderr sys.stdout = out sys.stderr = out # Perform a one shot report request to test the connection virt.start_sync(queue, event, None, True) except (VirtError, socket.error) as e: errors.append(repr(e)) virt.extra_errors.seek(0) more_errors = virt.extra_errors.read() if more_errors: errors.append(more_errors) for error in errors: if re.search(r'Connection refused', error, re.I): errors = ["Please make sure the server port is open." ] + errors break finally: self.logger.info(out.getvalue()) sys.stdout = orig_stdout sys.stderr = orig_stderr virt.extra_errors.close() virt.extra_errors = None return errors
def setUp(self): config = self.create_config(name='test', wrapper=None, type='rhevm', server='localhost', username='******', password='******', owner='owner', env='env') self.rhevm = Virt.from_config(self.logger, config, Datastore()) self.rhevm.major_version = '3' self.rhevm.build_urls()
def setUp(self): config = self.create_config(name='test', wrapper=None, type='xen', server='localhost', username='******', password='******', owner='owner', env='env') self.xen = Virt.from_config(self.logger, config, Datastore(), interval=DefaultInterval)
class TestVirtStatus(TestBase): def test_status_error(self): config_values = { 'type': 'virt', 'server': 'localhost', 'username': '******', 'password': '******', 'owner': 'owner', } config = VirtConfigSection('test', None) config.update(**config_values) self.virt = Virt(self.logger, config, None, interval=60) # No dest given here self.virt.status = True self.virt._send_data = Mock() self.virt._run = Mock( side_effect=VirtError('unable to connect to source')) self.run_once() self.virt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( self.virt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( self.virt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['message'], 'unable to connect to source.') def run_once(self, datastore=None): ''' Run generic virt in oneshot mode ''' self.virt._oneshot = True if datastore is None: datastore = Mock(spec=Datastore()) self.virt.dest = datastore self.virt._terminate_event = Event() self.virt._oneshot = True self.virt._interval = 0 self.virt.run()
def test_version_override(self, cfg, _): version = 'v1alpha3' cfg.return_value = Config() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion=version, hypervisor_id='hostname') kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt.prepare() self.assertEqual(version, kubevirt._version)
def test_insecure(self, cfg, _): cfg.return_value = Config() config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='v1alpha3', hypervisor_id='hostname', insecure='') kubevirt = Virt.from_config(self.logger, config, Datastore()) kubevirt.prepare() self.assertFalse(kubevirt._insecure)
def _create_virt_backends(self): """ Create virts list with virt backend threads """ virts = [] for name, config in self.dest_to_source_mapper.configs: try: virt = Virt.from_config(self.logger, config, self.datastore, terminate_event=self.terminate_event, interval=self.options[VW_GLOBAL]['interval'], oneshot=self.options[VW_GLOBAL]['oneshot']) except Exception as e: self.logger.error('Unable to use configuration "%s": %s', name, str(e)) continue virts.append(virt) return virts
def test_read_non_hypervisor_from_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=false file=%s """ % self.hypervisor_file) manager = ConfigManager(self.logger, self.config_dir) self.assertEquals(len(manager.configs), 1) virt = Virt.fromConfig(self.logger, manager.configs[0]) self.assertEquals(type(virt), FakeVirt) self.assertRaises(VirtError, virt.listDomains)
def test_read_hypervisor_from_non_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(NON_HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=true file=%s """ % self.hypervisor_file) manager = ConfigManager(self.logger, self.config_dir) self.assertEquals(len(manager.configs), 1) virt = Virt.from_config(self.logger, manager.configs[0], None) self.assertEquals(type(virt), FakeVirt) self.assertRaises(VirtError, virt.getHostGuestMapping)
def _create_virt_backends(self): """ Create virts list with virt backend threads """ virts = [] for name, config in self.dest_to_source_mapper.configs: try: virt = Virt.from_config( self.logger, config, self.datastore, terminate_event=self.terminate_event, interval=self.options[VW_GLOBAL]['interval'], oneshot=self.options[VW_GLOBAL]['oneshot']) except Exception as e: self.logger.error('Unable to use configuration "%s": %s', name, str(e)) continue virts.append(virt) return virts
def test_read_non_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(NON_HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=false file=%s """ % self.hypervisor_file) manager = ConfigManager(self.logger, self.config_dir) self.assertEquals(len(manager.configs), 1) virt = Virt.fromConfig(self.logger, manager.configs[0]) self.assertEquals(type(virt), FakeVirt) guests = virt.listDomains() self.assertEquals(len(guests), 1) guest = guests[0] self.assertEquals(guest.uuid, "9f06a84d-5f56-4e7e-be0c-937b3c1924d7") self.assertEquals(guest.state, 1)
def test_read_non_hypervisor(self): with open(self.hypervisor_file, "w") as f: f.write(NON_HYPERVISOR_JSON) with open(self.config_file, "w") as f: f.write(""" [test] type=fake is_hypervisor=false file=%s """ % self.hypervisor_file) effective_config = init_config({}, {}, config_dir=self.config_dir) manager = DestinationToSourceMapper(effective_config) self.assertEqual(len(manager.configs), 1) virt = Virt.from_config(self.logger, manager.configs[0][1], None) self.assertEqual(type(virt), FakeVirt) guests = virt.listDomains() self.assertEqual(len(guests), 1) guest = guests[0] self.assertEqual(guest.uuid, "9f06a84d-5f56-4e7e-be0c-937b3c1924d7") self.assertEqual(guest.state, 1)
def test_status_bad_source_credentials(self, cfg, _, kube_client): cfg.return_value = Config() kube_client.get_nodes = Mock(return_value=self.nodes()) kube_client.get_vms.return_value = Mock(return_value=self.vms()) self.config = self.create_config(name='test', wrapper=None, type='kubevirt', owner='owner', kubeconfig='/etc/hosts', kubeversion='version', hypervisor_id='hostname') self.config['server'] = 'kubeserver' with patch.dict('os.environ', {'KUBECONFIG': '/dev/null'}): kubevirt = Virt.from_config(self.logger, self.config, Datastore()) kubevirt.status = True kubevirt._send_data = Mock() kubevirt.statusConfirmConnection = Mock() kubevirt.statusConfirmConnection.side_effect = VirtError( "Incorrect domain/username/password") kubevirt.dest = Mock(spec=Datastore()) kubevirt._terminate_event = Event() kubevirt._oneshot = True kubevirt._interval = 0 kubevirt.run() kubevirt._send_data.assert_called_once_with(data_to_send=ANY) self.assertTrue( isinstance( kubevirt._send_data.mock_calls[0].kwargs['data_to_send'], StatusReport)) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['server'], self.config['server']) self.assertEqual( kubevirt._send_data.mock_calls[0].kwargs['data_to_send']. data['source']['message'], "Incorrect domain/username/password.")
def run(self): self.reloading = False if not self.options.oneshot: self.logger.debug( "Starting infinite loop with %d seconds interval", self.options.interval) # Queue for getting events from virt backends if self.queue is None: self.queue = Queue() # Run the virtualization backends self.virts = [] for config in self.configManager.configs: try: logger = log.getLogger(config=config) virt = Virt.fromConfig(logger, config) except Exception as e: self.logger.error('Unable to use configuration "%s": %s', config.name, str(e)) continue # Run the process virt.start(self.queue, self.terminate_event, self.options.interval, self.options.oneshot) self.virts.append(virt) # This set is used both for oneshot mode and to bypass rate-limit # when virt-who is starting self.oneshot_remaining = set(virt.config.name for virt in self.virts) if len(self.virts) == 0: err = "virt-who can't be started: no suitable virt backend found" self.logger.error(err) exit(1, err) # queued reports depend on OrderedDict feature that if key exists # when setting an item, it will remain in the same order self.queued_reports.clear() # Clear last reports, we need to resend them when reloaded self.last_reports_hash.clear() # List of reports that are being processed by server self.reports_in_progress = [] # Send the first report immediately self.send_after = time.time() while not self.terminate_event.is_set(): if self.reports_in_progress: # Check sent report status regularly timeout = 1 elif time.time() > self.send_after: if self.queued_reports: # Reports are queued and we can send them right now, # don't wait in queue timeout = 0 else: # No reports in progress or queued and we can send report # immediately, we can wait for report as long as we want timeout = 3600 else: # We can't send report right now, wait till we can timeout = max(1, self.send_after - time.time()) # Wait for incoming report from virt backend or for timeout try: report = self.queue.get(block=True, timeout=timeout) except Empty: report = None except IOError: continue # Read rest of the reports from the queue in order to remove # obsoleted reports from same virt while True: if isinstance(report, ErrorReport): if self.options.oneshot: # Don't hang on the failed backend try: self.oneshot_remaining.remove(report.config.name) except KeyError: pass self.logger.warn( 'Unable to collect report for config "%s"', report.config.name) elif isinstance(report, AbstractVirtReport): if self.last_reports_hash.get(report.config.name, None) == report.hash: self.logger.info( 'Report for config "%s" hasn\'t changed, not sending', report.config.name) else: if report.config.name in self.oneshot_remaining: # Send the report immediately self.oneshot_remaining.remove(report.config.name) if not self.options.print_: self.send_report(report.config.name, report) else: self.queued_reports[ report.config.name] = report else: self.queued_reports[report.config.name] = report elif report in ['exit', 'reload']: # Reload and exit reports takes priority, do not process # any other reports break # Get next report from queue try: report = self.queue.get(block=False) except Empty: break if report == 'exit': break elif report == 'reload': self.stop_virts() raise ReloadRequest() self.check_reports_state() if not self.reports_in_progress and self.queued_reports and time.time( ) > self.send_after: # No report is processed, send next one if not self.options.print_: self.send_current_report() if self.options.oneshot and not self.oneshot_remaining and not self.reports_in_progress: break self.queue = None self.stop_virts() self.virt = [] if self.options.print_: return self.queued_reports
def setUp(self): config = self.create_config(name='test', wrapper=None, type='rhevm', server='localhost', username='******', password=u'1€345678', owner='owner', env='env') self.rhevm = Virt.from_config(self.logger, config, Datastore()) self.rhevm.major_version = '3' self.rhevm.build_urls()
def run(self): self.reloading = False if not self.options.oneshot: self.logger.debug("Starting infinite loop with %d seconds interval", self.options.interval) # Queue for getting events from virt backends if self.queue is None: self.queue = Queue() # Run the virtualization backends self.virts = [] for config in self.configManager.configs: try: logger = log.getLogger(config=config) virt = Virt.fromConfig(logger, config) except Exception as e: self.logger.error('Unable to use configuration "%s": %s', config.name, str(e)) continue # Run the process virt.start(self.queue, self.terminate_event, self.options.interval, self.options.oneshot) self.virts.append(virt) # This set is used both for oneshot mode and to bypass rate-limit # when virt-who is starting self.oneshot_remaining = set(virt.config.name for virt in self.virts) if len(self.virts) == 0: err = "virt-who can't be started: no suitable virt backend found" self.logger.error(err) exit(1, err) # queued reports depend on OrderedDict feature that if key exists # when setting an item, it will remain in the same order self.queued_reports.clear() # Clear last reports, we need to resend them when reloaded self.last_reports_hash.clear() # List of reports that are being processed by server self.reports_in_progress = [] # Send the first report immediately self.send_after = time.time() while not self.terminate_event.is_set(): if self.reports_in_progress: # Check sent report status regularly timeout = 1 elif time.time() > self.send_after: if self.queued_reports: # Reports are queued and we can send them right now, # don't wait in queue timeout = 0 else: # No reports in progress or queued and we can send report # immediately, we can wait for report as long as we want timeout = 3600 else: # We can't send report right now, wait till we can timeout = max(1, self.send_after - time.time()) # Wait for incoming report from virt backend or for timeout try: report = self.queue.get(block=True, timeout=timeout) except Empty: report = None except IOError: continue # Read rest of the reports from the queue in order to remove # obsoleted reports from same virt while True: if isinstance(report, ErrorReport): if self.options.oneshot: # Don't hang on the failed backend try: self.oneshot_remaining.remove(report.config.name) except KeyError: pass self.logger.warn('Unable to collect report for config "%s"', report.config.name) elif isinstance(report, AbstractVirtReport): if self.last_reports_hash.get(report.config.name, None) == report.hash: self.logger.info('Report for config "%s" hasn\'t changed, not sending', report.config.name) else: if report.config.name in self.oneshot_remaining: # Send the report immediately self.oneshot_remaining.remove(report.config.name) if not self.options.print_: self.send_report(report.config.name, report) else: self.queued_reports[report.config.name] = report else: self.queued_reports[report.config.name] = report elif report in ['exit', 'reload']: # Reload and exit reports takes priority, do not process # any other reports break # Get next report from queue try: report = self.queue.get(block=False) except Empty: break if report == 'exit': break elif report == 'reload': self.stop_virts() raise ReloadRequest() self.check_reports_state() if not self.reports_in_progress and self.queued_reports and time.time() > self.send_after: # No report is processed, send next one if not self.options.print_: self.send_current_report() if self.options.oneshot and not self.oneshot_remaining and not self.reports_in_progress: break self.queue = None self.stop_virts() self.virt = [] if self.options.print_: return self.queued_reports