Пример #1
0
    def run(self):
        workers = []
        LOG.info('Starting Queues')

        libvirt = util.get_libvirt()
        conn = libvirt.open(None)
        present_cpus, _, _ = conn.getCPUMap()

        while True:
            try:
                for w in copy.copy(workers):
                    if not w.is_alive():
                        w.join(1)
                        workers.remove(w)

                if len(workers) < present_cpus / 2:
                    jobname, workitem = db.dequeue(config.NODE_NAME)
                else:
                    workitem = None

                if not workitem:
                    time.sleep(0.2)
                    continue

                p = multiprocessing.Process(
                    target=handle, args=(jobname, workitem,),
                    name='%s-worker' % daemon.process_name('queues'))
                p.start()
                workers.append(p)

            except Exception as e:
                util.ignore_exception(daemon.process_name('queues'), e)
Пример #2
0
    def _process_network_node_workitems(self):
        jobname, workitem = db.dequeue('networknode')
        try:
            if not workitem:
                time.sleep(0.2)
                return

            log_ctx = LOG.withField('workitem', workitem)
            if not NetworkTask.__subclasscheck__(type(workitem)):
                raise exceptions.UnknownTaskException(
                    'Network workitem was not decoded: %s' % workitem)

            n = net.from_db(workitem.network_uuid())
            if not n:
                log_ctx.withNetwork(workitem.network_uuid()).warning(
                    'Received work item for non-existent network')
                return

            # NOTE(mikal): there's really nothing stopping us from processing a bunch
            # of these jobs in parallel with a pool of workers, but I am not sure its
            # worth the complexity right now. Are we really going to be changing
            # networks that much?
            if isinstance(workitem, DeployNetworkTask):
                try:
                    n.create()
                    n.ensure_mesh()
                    db.add_event('network', workitem.network_uuid(),
                                 'network node', 'deploy', None, None)
                except exceptions.DeadNetwork as e:
                    log_ctx.withField('exception', e).warning(
                        'DeployNetworkTask on dead network')

            elif isinstance(workitem, UpdateDHCPNetworkTask):
                try:
                    n.create()
                    n.ensure_mesh()
                    n.update_dhcp()
                    db.add_event('network', workitem.network_uuid(),
                                 'network node', 'update dhcp', None, None)
                except exceptions.DeadNetwork as e:
                    log_ctx.withField('exception', e).warning(
                        'UpdateDHCPNetworkTask on dead network')

            elif isinstance(workitem, RemoveDHCPNetworkTask):
                n.remove_dhcp()
                db.add_event('network', workitem.network_uuid(),
                             'network node', 'remove dhcp', None, None)

        finally:
            if jobname:
                db.resolve('networknode', jobname)
Пример #3
0
    def run(self):
        logutil.info(None, 'Starting')
        gauges = {
            'updated_at': Gauge('updated_at', 'The last time metrics were updated')
        }

        last_metrics = 0

        def update_metrics():
            global last_metrics

            stats = _get_stats()
            for metric in stats:
                if metric not in gauges:
                    gauges[metric] = Gauge(metric, '')
                gauges[metric].set(stats[metric])

            db.update_metrics_bulk(stats)
            logutil.info(None, 'Updated metrics')
            gauges['updated_at'].set_to_current_time()

        while True:
            try:
                jobname, _ = db.dequeue(
                    '%s-metrics' % config.parsed.get('NODE_NAME'))
                if jobname:
                    if time.time() - last_metrics > 2:
                        update_metrics()
                        last_metrics = time.time()
                    db.resolve('%s-metrics' % config.parsed.get('NODE_NAME'),
                               jobname)
                else:
                    time.sleep(0.2)

                if time.time() - last_metrics > config.parsed.get('SCHEDULER_CACHE_TIMEOUT'):
                    update_metrics()
                    last_metrics = time.time()

            except Exception as e:
                util.ignore_exception('resource statistics', e)