Esempio n. 1
0
def delete_cached(task_id, broker=None):
    """
    Delete a task from the cache backend
    """
    if not broker:
        broker = get_broker()
    broker.cache.delete('{}:{}'.format(broker.list_key, task_id))
Esempio n. 2
0
 def __init__(self,
              stop_event,
              start_event,
              broker=None,
              timeout=Conf.TIMEOUT,
              start=True):
     # Make sure we catch signals for the pool
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     signal.signal(signal.SIGTERM, signal.SIG_DFL)
     self.pid = current_process().pid
     self.parent_pid = get_ppid()
     self.name = current_process().name
     self.broker = broker or get_broker()
     self.reincarnations = 0
     self.tob = datetime.now()
     self.stop_event = stop_event
     self.start_event = start_event
     self.pool_size = Conf.WORKERS
     self.pool = []
     self.timeout = timeout
     self.task_queue = Queue(
         maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
     self.result_queue = Queue()
     self.event_out = Event()
     self.monitor = None
     self.pusher = None
     if start:
         self.start()
Esempio n. 3
0
def pusher(task_queue, event, broker=None):
    if not broker:
        broker = get_broker()
    name = current_process().name
    pid = current_process().pid
    logger.info('{} pushing tasks at {}'.format(name, pid))
    while 1:
        try:
            task_set = broker.dequeue()
        except BaseException as e:
            logger.error(e)
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                try:
                    task = signing.PickleSerializer.loads(task)
                except (TypeError, KeyError, signing.BadSerializer):
                    logger.error(e)
                    continue
                task_queue.put(task)
            logger.debug('queueing from {}'.format(broker.list_key))
        else:
            sleep(.1)
        if event.is_set():
            break
    logger.info('{} stopped pushing tasks'.format(name))
Esempio n. 4
0
 def __init__(self, broker=None):
     self.broker = broker or get_broker()
     self.sentinel = None
     self.stop_event = None
     self.start_event = None
     self.pid = current_process().pid
     self.host = socket.gethostname()
     self.timeout = Conf.TIMEOUT
     signal.signal(signal.SIGTERM, self.sig_handler)
     signal.signal(signal.SIGINT, self.sig_handler)
Esempio n. 5
0
def delete_group_cached(group_id, broker=None):
    """
    Delete a group from the cache backend
    """
    if not broker:
        broker = get_broker()
    group_key = '{}:{}:keys'.format(broker.list_key, group_id)
    group_list = broker.cache.get(group_key)
    broker.cache.delete_many(group_list)
    broker.cache.delete(group_key)
Esempio n. 6
0
 def get(cluster_id, broker=None):
     """
     gets the current status for the cluster
     :param cluster_id: id of the cluster
     :return: Stat or Status
     """
     if not broker:
         broker = get_broker()
     pack = broker.get_stat(Stat.get_key(cluster_id))
     if pack:
         try:
             return signing.PickleSerializer.loads(pack)
         except signing.BadSerializer:
             return None
     return Status(cluster_id)
Esempio n. 7
0
 def get_all(broker=None):
     """
     Get the status for all currently running clusters with the same prefix
     and secret key.
     :return: list of type Stat
     """
     if not broker:
         broker = get_broker()
     stats = []
     packs = broker.get_stats('{}:*'.format(Conf.Q_STAT)) or []
     for pack in packs:
         try:
             stats.append(signing.PickleSerializer.loads(pack))
         except signing.BadSerializer:
             continue
     return stats
Esempio n. 8
0
def result_cached(task_id, wait=0, broker=None):
    """
    Return the result from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time.time()
    while 1:
        r = broker.cache.get('{}:{}'.format(broker.list_key, task_id))
        if r:
            task = signing.PickleSerializer.loads(r)
            delete_cached(task_id, broker)
            return task['success'], task['result']
        if (time.time() - start) * 1000 >= wait >= 0:
            break
        time.sleep(0.01)
Esempio n. 9
0
def count_group_cached(group_id, failures=False, broker=None):
    """
    Count the results in a group in the cache backend
    """
    if not broker:
        broker = get_broker()
    group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
    if group_list:
        if not failures:
            return len(failures)
        failure_count = 0
        for task_key in group_list:
            task = signing.SignedPackage.loads(broker.cache.get(task_key))
            if not task['success']:
                failure_count += 1
        return failure_count
Esempio n. 10
0
 def __init__(self, sentinel):
     super(Stat, self).__init__(sentinel.parent_pid or sentinel.pid)
     self.broker = sentinel.broker or get_broker()
     self.tob = sentinel.tob
     self.reincarnations = sentinel.reincarnations
     self.sentinel = sentinel.pid
     self.status = sentinel.status()
     self.done_q_size = 0
     self.task_q_size = 0
     if Conf.QSIZE:
         self.done_q_size = sentinel.result_queue.qsize()
         self.task_q_size = sentinel.task_queue.qsize()
     if sentinel.monitor:
         self.monitor = sentinel.monitor.pid
     if sentinel.pusher:
         self.pusher = sentinel.pusher.pid
     self.workers = [w.pid for w in sentinel.pool]
Esempio n. 11
0
def async(func, *args, **kwargs):
    """Queue a task for the cluster."""
    keywords = kwargs.copy()
    opt_keys = ('group', 'save', 'sync', 'cached', 'priority', 'chain', 'broker', 'uid')
    d_options = keywords.pop('d_options', None)
    # get an id
    tag = uuid()
    # build the task package
    task = {'id': tag[1],
            'name': tag[0],
            'func': func,
            'args': args}
    # push optionals
    for key in opt_keys:
        if d_options and key in d_options:
            task[key] = d_options['key']
        elif key in keywords:
            task[key] = keywords.pop(key)
    # broker
    broker = keywords.pop('broker', get_broker())
    # group
    if task.get('uid', False):
        task['id'] = task['uid']
    # overrides
    if 'cached' not in task and Conf.CACHED:
        task['cached'] = Conf.CACHED
    if 'sync' not in task and Conf.SYNC:
        task['sync'] = Conf.SYNC
    if 'priority' not in task or task['priority'] is None:
        task['priority'] = Conf.PRIORITY
    # finalize
    task['kwargs'] = keywords
    task['started'] = datetime.now()
    # sign it
    pack = signing.PickleSerializer.dumps(task)
    # sync
    if task.get('sync', False):
        return _sync(pack)
    # push it
    ret = broker.enqueue(task['id'], pack, task['priority'])
    logger.debug('Pushed {}'.format(task['id']))
    return ret, task['id']
Esempio n. 12
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to DB or Cache
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    pid = current_process().pid
    logger.info('{} monitoring at {}'.format(name, pid))
    for task in iter(result_queue.get, 'STOP'):
        ackid = task.get('id', None)
        if ackid:
            broker.acknowledge(ackid)
        # save the result
        save_cached(task, broker)
        # log the result
        if task['success']:
            logger.debug("Processed [{}]".format(task['name']))
        else:
            logger.error("Failed [{}] - {}".format(task['name'],
                                                   task['result']))
    logger.info("{} stopped monitoring results".format(name))
Esempio n. 13
0
def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):
    """
    Return a list of results for a task group from the cache backend
    """
    if not broker:
        broker = get_broker()
    start = time.time()
    if count:
        while 1:
            if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:
                break
            tile.sleep(0.01)
    while 1:
        group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))
        if group_list:
            result_list = []
            for task_key in group_list:
                task = signing.SignedPackage.loads(broker.cache.get(task_key))
                if task['success'] or failures:
                    result_list.append(task['result'])
            return result_list
        if (time.time() - start) * 1000 >= wait >= 0:
            break
        time.sleep(0.01)
Esempio n. 14
0
def queue_size(broker=None):
    if not broker:
        broker = get_broker()
    return broker.queue_size()