Пример #1
0
 def __init__(self, size_or_pool=1000):
     if isinstance(size_or_pool, GreenPool):
         self.pool = size_or_pool
     else:
         self.pool = GreenPool(size_or_pool)
     self.waiters = queue.LightQueue()
     self.counter = 0
Пример #2
0
 def __init__(self, conf, logger=None):
     """
     :param conf: configuration object obtained from ConfigParser
     :param logger: logging object
     """
     self.conf = conf
     self.logger = PrefixLoggerAdapter(
         logger or get_logger(conf, log_route='object-replicator'), {})
     self.devices_dir = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.bind_ip = conf.get('bind_ip', '0.0.0.0')
     self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
     self.port = None if self.servers_per_port else \
         int(conf.get('bind_port', 6200))
     self.concurrency = int(conf.get('concurrency', 1))
     self.replicator_workers = int(conf.get('replicator_workers', 0))
     self.stats_interval = int(conf.get('stats_interval', '300'))
     self.ring_check_interval = int(conf.get('ring_check_interval', 15))
     self.next_check = time.time() + self.ring_check_interval
     self.replication_cycle = random.randint(0, 9)
     self.partition_times = []
     self.interval = int(
         conf.get('interval') or conf.get('run_pause') or 30)
     self.rsync_timeout = int(
         conf.get('rsync_timeout', DEFAULT_RSYNC_TIMEOUT))
     self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
     self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
     self.rsync_compress = config_true_value(
         conf.get('rsync_compress', 'no'))
     self.rsync_module = conf.get('rsync_module', '').rstrip('/')
     if not self.rsync_module:
         self.rsync_module = '{replication_ip}::object'
     self.http_timeout = int(conf.get('http_timeout', 60))
     self.recon_cache_path = conf.get('recon_cache_path',
                                      '/var/cache/swift')
     self.rcache = os.path.join(self.recon_cache_path, "object.recon")
     self._next_rcache_update = time.time() + self.stats_interval
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
     self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
     self.default_headers = {
         'Content-Length': '0',
         'user-agent': 'object-replicator %s' % os.getpid()
     }
     self.rsync_error_log_line_length = \
         int(conf.get('rsync_error_log_line_length', 0))
     self.handoffs_first = config_true_value(
         conf.get('handoffs_first', False))
     self.handoff_delete = config_auto_int_value(
         conf.get('handoff_delete', 'auto'), 0)
     if any((self.handoff_delete, self.handoffs_first)):
         self.logger.warning('Handoff only mode is not intended for normal '
                             'operation, please disable handoffs_first and '
                             'handoff_delete before the next '
                             'normal rebalance')
     self.is_multiprocess_worker = None
     self._df_router = DiskFileRouter(conf, self.logger)
     self._child_process_reaper_queue = queue.LightQueue()
Пример #3
0
 def Run(self, context, *args, **kwargs):
     '''set up queue, start fetchers'''
     if self._input_queue == None:
         LOG.info(_("Manager %(cname)s inputqueue is None"),
                  {'cname': self.__class__.__name__})
         raise
     for fetcher in self.fetcher_managers:
         fetcher.m_output_queue = self.output_queue
         fetcher.m_input_queue = queue.LightQueue(CONF.max_queue_size)
         context.tg.add_thread(fetcher.Run, context, *args, **kwargs)
     while True:
         if self._input_queue.empty():
             greenthread.sleep(3)
             continue
         crawldoc = self._input_queue.get()
         fetch_id = self.dispatcher.dispatch(crawldoc)
         print fetch_id
         print len(self.fetcher_managers)
         LOG.debug(
             _("Prepare dispatch to %(fetch_num)s , crawldoc: %(crawldoc)s"
               ), {
                   'fetch_num': self.fetcher_managers[fetch_id].m_id,
                   'crawldoc': crawldoc
               })
         '''TODO: One fetcher hang will hang dispatch, if queue full, try put the queue front
         element to queue end'''
         self.fetcher_managers[fetch_id].wait_for_inputqueue_ready()
         self.fetcher_managers[fetch_id].m_input_queue.put(crawldoc)
Пример #4
0
def perform_requests(operator, nodes, element, cfg, inputdata):
    cryptit = cfg.decrypt
    cfg.decrypt = True
    configdata = cfg.get_node_attributes(nodes, _configattributes)
    cfg.decrypt = cryptit
    resultdata = queue.LightQueue()
    livingthreads = set([])
    for node in nodes:
        livingthreads.add(
            _ipmiworkers.spawn(perform_request, operator, node, element,
                               configdata, inputdata, cfg, resultdata))
    while livingthreads:
        try:
            datum = resultdata.get(timeout=10)
            while datum:
                if datum != 'Done':
                    yield datum
                datum = resultdata.get_nowait()
        except queue.Empty:
            pass
        for t in list(livingthreads):
            if t.dead:
                livingthreads.discard(t)
    try:
        # drain queue if a thread put something on the queue and died
        while True:
            datum = resultdata.get_nowait()
            if datum != 'Done':
                yield datum
    except queue.Empty:
        pass
Пример #5
0
 def __init__(self, max_workers=1000):
     assert int(max_workers) > 0, 'Max workers must be greater than zero'
     self._max_workers = int(max_workers)
     self._pool = greenpool.GreenPool(self._max_workers)
     self._work_queue = queue.LightQueue()
     self._shutdown_lock = threading.RLock()
     self._shutdown = False
Пример #6
0
 def __init__(self, max_workers=1000):
     assert EVENTLET_AVAILABLE, 'eventlet is needed to use GreenExecutor'
     assert int(max_workers) > 0, 'Max workers must be greater than zero'
     self._max_workers = int(max_workers)
     self._pool = greenpool.GreenPool(self._max_workers)
     self._work_queue = queue.LightQueue()
     self._shutdown_lock = threading.RLock()
     self._shutdown = False
Пример #7
0
 def __init__(self, conf, msg_id, timeout, connection_pool):
     self._msg_id = msg_id
     self._timeout = timeout or conf.rpc_response_timeout
     self._reply_proxy = connection_pool.reply_proxy
     self._done = False
     self._got_ending = False
     self._conf = conf
     self._dataqueue = queue.LightQueue()
     # Add this caller to the reply proxy's call_waiters
     self._reply_proxy.add_call_waiter(self, self._msg_id)
Пример #8
0
def perform_requests(operator, nodes, element, cfg, inputdata):
    cryptit = cfg.decrypt
    cfg.decrypt = True
    configdata = cfg.get_node_attributes(nodes, _configattributes)
    cfg.decrypt = cryptit
    resultdata = queue.LightQueue()
    pendingnum = len(nodes)
    for node in nodes:
        _ipmiworkers.spawn_n(
            perform_request, operator, node, element, configdata, inputdata,
            cfg, resultdata)
    while pendingnum:
        datum = resultdata.get()
        if datum == 'Done':
            pendingnum -= 1
        else:
            yield datum
Пример #9
0
def retrieve(nodes, element, configmanager, inputdata):
    results = queue.LightQueue()
    workers = set([])
    if element == ['power', 'state']:
        for node in nodes:
            yield msg.PowerState(node=node, state='on')
        return
    elif element == ['health', 'hardware']:
        _run_method(retrieve_health, workers, results, configmanager, nodes,
                    element)
    elif element[:3] == ['inventory', 'hardware', 'all']:
        _run_method(retrieve_inventory, workers, results, configmanager, nodes,
                    element)
    elif element[:3] == ['inventory', 'firmware', 'all']:
        _run_method(retrieve_firmware, workers, results, configmanager, nodes,
                    element)
    elif element == ['sensors', 'hardware', 'all']:
        _run_method(list_sensors, workers, results, configmanager, nodes,
                    element)
    elif element[:3] == ['sensors', 'hardware', 'all']:
        _run_method(retrieve_sensors, workers, results, configmanager, nodes,
                    element)
    else:
        for node in nodes:
            yield msg.ConfluentNodeError(node, 'Not Implemented')
        return
    while workers:
        try:
            datum = results.get(10)
            while datum:
                if datum:
                    yield datum
                datum = results.get_nowait()
        except queue.Empty:
            pass
        eventlet.sleep(0.001)
        for t in list(workers):
            if t.dead:
                workers.discard(t)
    try:
        while True:
            datum = results.get_nowait()
            if datum:
                yield datum
    except queue.Empty:
        pass
Пример #10
0
def retrieve(nodes, element, configmanager, inputdata):
    results = queue.LightQueue()
    workers = set([])
    if element == ['power', 'state']:
        for node in nodes:
            yield msg.PowerState(node=node, state='on')
        return
    elif element == ['health', 'hardware']:
        creds = configmanager.get_node_attributes(nodes, [
            'secret.hardwaremanagementuser',
            'secret.hardwaremanagementpassword'
        ],
                                                  decrypt=True)
        for node in nodes:
            workers.add(
                eventlet.spawn(retrieve_health, configmanager, creds, node,
                               results))
    else:
        for node in nodes:
            yield msg.ConfluentNodeError(node, 'Not Implemented')
        return
    currtimeout = 10
    while workers:
        try:
            datum = results.get(10)
            while datum:
                if datum:
                    yield datum
                datum = results.get_nowait()
        except queue.Empty:
            pass
        eventlet.sleep(0.001)
        for t in list(workers):
            if t.dead:
                workers.discard(t)
    try:
        while True:
            datum = results.get_nowait()
            if datum:
                yield datum
    except queue.Empty:
        pass
Пример #11
0
    def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
        """*order_as_stack* governs the ordering of the items in the free pool.
        If ``False`` (the default), the free items collection (of items that
        were created and were put back in the pool) acts as a round-robin,
        giving each item approximately equal utilization.  If ``True``, the
        free pool acts as a FILO stack, which preferentially re-uses items that
        have most recently been used.
        """
        self.min_size = min_size
        self.max_size = max_size
        self.order_as_stack = order_as_stack
        self.current_size = 0
        self.channel = queue.LightQueue(0)
        self.free_items = collections.deque()
        if create is not None:
            self.create = create

        for x in xrange(min_size):
            self.current_size += 1
            self.free_items.append(self.create())
Пример #12
0
 def __init__(self, size_or_pool):
     super(GreenMap, self).__init__(size_or_pool)
     self.waiters = queue.LightQueue(maxsize=self.pool.size)
Пример #13
0
 def __init__(self):
     """Create a :class:`SequentialEventletHandler` instance"""
     self.callback_queue = green_queue.LightQueue()
     self.completion_queue = green_queue.LightQueue()
     self._workers = []
     self._started = False
Пример #14
0
 def init_pool(self):
     self.pool = queue.LightQueue(0)
Пример #15
0
 def subscribe(self, tenant):
     sub = queue.LightQueue()
     self._subscribers.setdefault(tenant, set()).add(sub)
     return sub