def dequeue(queuename): try: return etcd.dequeue(queuename) except exceptions.LockException: # We didn't acquire the lock, we should just try again later. This probably # indicates congestion. return None, None
def test_dequeue_delete(self, m_delete, m_put, m_get_lock, m_get_prefix): jobname, workitem = etcd.dequeue('node01') self.assertEqual('somejob', jobname) expected = [ tasks.DeleteInstanceTask('fake_uuid'), ] self.assertCountEqual(expected, workitem['tasks']) self.assertSequenceEqual(expected, workitem['tasks'])
def dequeue_work_item(self, queue_name, processing_callback): if len(self.workers) > self.present_cpus / 2: return False jobname, workitem = etcd.dequeue(queue_name) if not workitem: return False self.start_workitem(processing_callback, (jobname, workitem), 'worker')
def test_dequeue_image_fetch(self, m_delete, m_put, m_get_lock, m_get_prefix): jobname, workitem = etcd.dequeue('node01') self.assertEqual('1631269187.890441', jobname) expected = [ tasks.FetchImageTask('http://whoknows', 'fake_uuid'), ] self.assertCountEqual(expected, workitem['tasks']) self.assertSequenceEqual(expected, workitem['tasks'])
def test_dequeue_start(self, m_delete, m_put, m_get_lock, m_get_prefix): jobname, workitem = etcd.dequeue('node01') self.assertEqual('1631269187.890441', jobname) expected = [ tasks.StartInstanceTask('fake_uuid'), ] self.assertCountEqual(expected, workitem['tasks']) self.assertSequenceEqual(expected, workitem['tasks'])
def test_dequeue_multi(self, m_delete, m_put, m_get_lock, m_get_prefix): jobname, workitem = etcd.dequeue('node01') self.assertEqual('somejob', jobname) expected = [ tasks.PreflightInstanceTask('diff_uuid'), tasks.StartInstanceTask('fake_uuid'), tasks.ErrorInstanceTask('fake_uuid'), ] self.assertCountEqual(expected, workitem['tasks']) self.assertSequenceEqual(expected, workitem['tasks'])
def run(self): LOG.info('Starting') gauges = { 'updated_at': Gauge('updated_at', 'The last time metrics were updated') } last_metrics = 0 def update_metrics(): global last_metrics stats = _get_stats() for metric in stats: if metric not in gauges: gauges[metric] = Gauge(metric, '') gauges[metric].set(stats[metric]) etcd.put('metrics', config.NODE_NAME, None, { 'fqdn': config.NODE_NAME, 'timestamp': time.time(), 'metrics': stats }, ttl=120) gauges['updated_at'].set_to_current_time() while not self.exit.is_set(): try: jobname, _ = etcd.dequeue('%s-metrics' % config.NODE_NAME) if jobname: if time.time() - last_metrics > 2: update_metrics() last_metrics = time.time() etcd.resolve('%s-metrics' % config.NODE_NAME, jobname) else: self.exit.wait(0.2) timer = time.time() - last_metrics if timer > config.SCHEDULER_CACHE_TIMEOUT: update_metrics() last_metrics = time.time() except Exception as e: util_general.ignore_exception('resource statistics', e)
def _process_network_node_workitems(self): while not self.exit.is_set(): jobname, workitem = etcd.dequeue('networknode') if not workitem: return else: try: log_ctx = LOG.with_field('workitem', workitem) if NetworkTask.__subclasscheck__(type(workitem)): self._process_network_workitem(log_ctx, workitem) elif NetworkInterfaceTask.__subclasscheck__(type(workitem)): self._process_networkinterface_workitem( log_ctx, workitem) else: raise exceptions.UnknownTaskException( 'Network workitem was not decoded: %s' % workitem) finally: etcd.resolve('networknode', jobname)