Beispiel #1
0
    def rebuilder_pass(self, **kwargs):
        self.start_time = self.last_report = time.time()
        self.log_report('START', force=True)

        workers = list()
        with ContextPool(self.nworkers + 1) as pool:
            # spawn one worker for the retry queue
            rqueue = eventlet.Queue(self.nworkers)
            pool.spawn(self._read_retry_queue, rqueue, **kwargs)

            # spawn workers to rebuild
            queue = eventlet.Queue(self.nworkers * 10)
            for i in range(self.nworkers):
                worker = self._create_worker(**kwargs)
                workers.append(worker)
                pool.spawn(worker.rebuilder_pass,
                           i,
                           queue,
                           retry_queue=rqueue,
                           **kwargs)

            # fill the queue (with the main thread)
            self._fill_queue(queue, **kwargs)

            # block until all items are rebuilt
            queue.join()
            # block until the retry queue is empty
            rqueue.join()

        self.log_report('DONE', force=True)
        return self.total_errors == 0
Beispiel #2
0
    def __init__(self, conf, tool):
        super(_LocalDispatcher, self).__init__(conf, tool)

        nb_workers = int_value(self.conf.get('workers'),
                               self.tool.DEFAULT_WORKERS)
        self.max_items_per_second = int_value(
            self.conf.get('items_per_second'),
            self.tool.DEFAULT_ITEM_PER_SECOND)
        self.queue_workers = eventlet.Queue(nb_workers * 2)
        self.queue_reply = eventlet.Queue()

        self.workers = list()
        for _ in range(nb_workers):
            worker = self.tool.create_worker(self.queue_workers,
                                             self.queue_reply)
            self.workers.append(worker)
Beispiel #3
0
    def __init__(self, conf, accounts=None, **kwargs):
        super(AccountRebuilder, self).__init__(conf, **kwargs)
        self._accounts_to_refresh = set()
        self._accounts_refreshed = eventlet.Queue()

        # input
        self.accounts = accounts

        self.account_client = AccountClient(self.conf, logger=self.logger)
Beispiel #4
0
    def __init__(self, conf, tool):
        super(_LocalDispatcher, self).__init__(conf, tool)

        concurrency = int_value(self.conf.get(
            'concurrency'), self.tool.DEFAULT_CONCURRENCY)
        self.max_items_per_second = int_value(self.conf.get(
            'items_per_second'), self.tool.DEFAULT_ITEM_PER_SECOND)
        if self.max_items_per_second > 0:
            # Max 5 seconds in advance
            queue_size = self.max_items_per_second * 5
        else:
            queue_size = concurrency * 1024
        self.queue_workers = eventlet.Queue(queue_size)
        self.queue_reply = eventlet.Queue()

        self.workers = list()
        for _ in range(concurrency):
            worker = self.tool.create_worker(
                self.queue_workers, self.queue_reply)
            self.workers.append(worker)
Beispiel #5
0
    def rebuilder_pass(self, **kwargs):
        self.start_time = self.last_report = time.time()
        self.log_report('START', force=True)

        workers = list()
        with ContextPool(self.concurrency + 1) as pool:
            # spawn one worker for the retry queue
            rqueue = eventlet.Queue(self.concurrency)
            pool.spawn(self._read_retry_queue, rqueue, **kwargs)

            # spawn workers to rebuild
            queue = eventlet.Queue(self.concurrency * 10)
            for i in range(self.concurrency):
                worker = self._create_worker(**kwargs)
                workers.append(worker)
                pool.spawn(worker.rebuilder_pass,
                           i,
                           queue,
                           retry_queue=rqueue,
                           **kwargs)

            # fill the queue (with the main thread)

            try:
                self._fill_queue(queue, **kwargs)
            except Exception as exc:
                if self.running:
                    self.logger.error("Failed to fill queue: %s", exc)
                    self.success = False

            # block until all items are rebuilt
            queue.join()
            # block until the retry queue is empty
            rqueue.join()

        self.log_report('DONE', force=True)
        return self.success and self.total_errors == 0
Beispiel #6
0
    def __init__(self, conf, beanstalkd_addr=None, logger=None):
        self.conf = conf
        self.logger = logger or get_logger(self.conf)
        self.namespace = conf['namespace']
        self.success = True

        # exit gracefully
        self.running = True
        signal.signal(signal.SIGINT, self.exit_gracefully)
        signal.signal(signal.SIGTERM, self.exit_gracefully)

        # counters
        self.items_processed = 0
        self.total_items_processed = 0
        self.errors = 0
        self.total_errors = 0
        self.total_expected_items = None

        # report
        self.start_time = 0
        self.last_report = 0
        self.report_interval = int_value(self.conf.get(
            'report_interval'), self.DEFAULT_REPORT_INTERVAL)

        # dispatcher
        self.dispatcher = None

        # input
        self.beanstalkd = None
        if beanstalkd_addr:
            self.beanstalkd = BeanstalkdListener(
                beanstalkd_addr,
                self.conf.get('beanstalkd_worker_tube')
                or self.DEFAULT_BEANSTALKD_WORKER_TUBE,
                self.logger)

        # retry
        self.retryer = None
        self.retry_queue = None
        if self.beanstalkd:
            self.retryer = BeanstalkdSender(
                self.beanstalkd.addr, self.beanstalkd.tube, self.logger)
            self.retry_queue = eventlet.Queue()
        self.retry_delay = int_value(self.conf.get('retry_delay'),
                                     self.DEFAULT_RETRY_DELAY)
Beispiel #7
0
    def rebuilder_pass(self, **kwargs):
        start_time = time.time()

        workers = list()
        with ContextPool(self.nworkers) as pool:
            queue = eventlet.Queue(self.nworkers * 10)

            # spawn workers to rebuild
            for i in range(self.nworkers):
                worker = self._create_worker(**kwargs)
                workers.append(worker)
                pool.spawn(worker.rebuilder_pass, i, queue)

            # fill the queue
            self._fill_queue(queue, **kwargs)

            # block until all items are rebuilt
            queue.join()

        passes = 0
        errors = 0
        total_items_processed = 0
        waiting_time = 0
        rebuilder_time = 0
        info = self._init_info(**kwargs)
        for worker in workers:
            passes += worker.passes
            errors += worker.errors
            total_items_processed += worker.total_items_processed
            waiting_time += worker.waiting_time
            rebuilder_time += worker.rebuilder_time
            info = self._compute_info(worker, info, **kwargs)

        end_time = time.time()
        elapsed = (end_time - start_time) or 0.000001
        self.logger.info(
            self._get_report(start_time, end_time, passes, errors,
                             waiting_time, rebuilder_time, elapsed,
                             total_items_processed, info, **kwargs))