Beispiel #1
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        # acknowledge
        ack_id = task.pop('ack_id', False)
        if ack_id:
            broker.acknowledge(ack_id)
        # save the result
        if task.get('cached', False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # log the result
        if task['success']:
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            logger.error(
                _("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #2
0
def save_task(task):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT > 0) and task["success"]:
        return
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        Task.objects.create(
            id=task["id"],
            name=task["name"],
            func=task["func"],
            hook=task.get("hook"),
            args=task["args"],
            kwargs=task["kwargs"],
            started=task["started"],
            stopped=task["stopped"],
            result=task["result"],
            group=task.get("group"),
            success=task["success"],
        )
    except Exception as e:
        logger.error(e)
Beispiel #3
0
def save_task(task):
    """
    Saves the task package to Django
    """
    # SAVE LIMIT < 0 : Don't save success
    if Conf.SAVE_LIMIT < 0 and task['success']:
        return
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    if task['success'] and 0 < Conf.SAVE_LIMIT < Success.objects.count():
        Success.objects.last().delete()

    try:
        Task.objects.create(id=task['id'],
                            name=task['name'],
                            func=task['func'],
                            hook=task.get('hook'),
                            args=task['args'],
                            kwargs=task['kwargs'],
                            started=task['started'],
                            stopped=task['stopped'],
                            result=task['result'],
                            group=task.get('group'),
                            success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #4
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, "STOP"):
        # acknowledge
        ack_id = task.pop("ack_id", False)
        if ack_id:
            broker.acknowledge(ack_id)
        # save the result
        if task.get("cached", False):
            save_cached(task, broker)
        else:
            save_task(task)
        # log the result
        if task["success"]:
            logger.info(_("Processed [{}]").format(task["name"]))
        else:
            logger.error(_("Failed [{}] - {}").format(task["name"], task["result"]))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #5
0
def pusher(task_queue, event, list_key=Conf.Q_LIST, r=redis_client):
    """
    Pulls tasks of the Redis List and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    :type list_key: str
    """
    logger.info(_("{} pushing tasks at {}").format(current_process().name, current_process().pid))
    while True:
        try:
            task = r.blpop(list_key, 1)
        except Exception as e:
            logger.error(e)
            # redis probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task:
            # unpack the task
            try:
                task = signing.SignedPackage.loads(task[1])
            except (TypeError, signing.BadSignature) as e:
                logger.error(e)
                continue
            task_queue.put(task)
            logger.debug(_("queueing from {}").format(list_key))
        if event.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Beispiel #6
0
def scheduler(list_key=Conf.Q_LIST):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):
        args = ()
        kwargs = {}
        # get args, kwargs and hook
        if s.kwargs:
            try:
                # eval should be safe here cause dict()
                kwargs = eval('dict({})'.format(s.kwargs))
            except SyntaxError:
                kwargs = {}
        if s.args:
            args = ast.literal_eval(s.args)
            # single value won't eval to tuple, so:
            if type(args) != tuple:
                args = (args,)
        q_options = kwargs.get('q_options', {})
        if s.hook:
            q_options['hook'] = s.hook
        # set up the next run time
        if not s.schedule_type == s.ONCE:
            next_run = arrow.get(s.next_run)
            if s.schedule_type == s.HOURLY:
                next_run = next_run.replace(hours=+1)
            elif s.schedule_type == s.DAILY:
                next_run = next_run.replace(days=+1)
            elif s.schedule_type == s.WEEKLY:
                next_run = next_run.replace(weeks=+1)
            elif s.schedule_type == s.MONTHLY:
                next_run = next_run.replace(months=+1)
            elif s.schedule_type == s.QUARTERLY:
                next_run = next_run.replace(months=+3)
            elif s.schedule_type == s.YEARLY:
                next_run = next_run.replace(years=+1)
            s.next_run = next_run.datetime
            s.repeats += -1
        # send it to the cluster
        q_options['list_key'] = list_key
        q_options['group'] = s.name or s.id
        kwargs['q_options'] = q_options
        s.task = tasks.async(s.func, *args, **kwargs)
        # log it
        if not s.task:
            logger.error(
                _('{} failed to create a task from schedule [{}]').format(current_process().name, s.name or s.id))
        else:
            logger.info(
                _('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
        # default behavior is to delete a ONCE schedule
        if s.schedule_type == s.ONCE:
            if s.repeats < 0:
                s.delete()
                return
            # but not if it has a positive repeats
            s.repeats = 0
        # save the schedule
        s.save()
Beispiel #7
0
def pusher(task_queue, e, list_key=Conf.Q_LIST, r=redis_client):
    """
    Pulls tasks of the Redis List and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type e: multiprocessing.Event
    :type list_key: str
    """
    logger.info(
        _('{} pushing tasks at {}').format(current_process().name,
                                           current_process().pid))
    while True:
        try:
            task = r.blpop(list_key, 1)
        except Exception as e:
            logger.error(e)
            # redis probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task:
            task = task[1]
            task_queue.put(task)
            logger.debug(_('queueing from {}').format(list_key))
        if e.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Beispiel #8
0
def pusher(task_queue, event, broker=None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(
        _('{} pushing tasks at {}').format(current_process().name,
                                           current_process().pid))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e)
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = signing.SignedPackage.loads(task[1])
                except (TypeError, signing.BadSignature) as e:
                    logger.error(e)
                    broker.fail(ack_id)
                    continue
                task['ack_id'] = ack_id
                task_queue.put(task)
            logger.debug(_('queueing from {}').format(broker.list_key))
        if event.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Beispiel #9
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT > 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        Task.objects.create(id=task['id'],
                            name=task['name'],
                            func=task['func'],
                            hook=task.get('hook'),
                            args=task['args'],
                            kwargs=task['kwargs'],
                            started=task['started'],
                            stopped=task['stopped'],
                            result=task['result'],
                            group=task.get('group'),
                            success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #10
0
def save_task(task):
    """
    Saves the task package to Django
    """
    # SAVE LIMIT < 0 : Don't save success
    if Conf.SAVE_LIMIT < 0 and task['success']:
        return
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    if task['success'] and 0 < Conf.SAVE_LIMIT < Success.objects.count():
        Success.objects.last().delete()

    try:
        Task.objects.create(id=task['id'],
                            name=task['name'],
                            func=task['func'],
                            hook=task.get('hook'),
                            args=task['args'],
                            kwargs=task['kwargs'],
                            started=task['started'],
                            stopped=task['stopped'],
                            result=task['result'],
                            group=task.get('group'),
                            success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #11
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        # save the result
        if task.get('cached', False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # acknowledge and log the result
        if task['success']:
            # acknowledge
            ack_id = task.pop('ack_id', False)
            if ack_id:
                broker.acknowledge(ack_id)
            # log success
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            # log failure
            logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #12
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT > 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'],
                          group=task['group'],
                          cached=task['cached'],
                          sync=task['sync'],
                          broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        Task.objects.create(id=task['id'],
                            name=task['name'],
                            func=task['func'],
                            hook=task.get('hook'),
                            args=task['args'],
                            kwargs=task['kwargs'],
                            started=task['started'],
                            stopped=task['stopped'],
                            result=task['result'],
                            group=task.get('group'),
                            success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #13
0
def pusher(task_queue, event, broker=None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e)
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = signing.SignedPackage.loads(task[1])
                except (TypeError, signing.BadSignature) as e:
                    logger.error(e)
                    broker.fail(ack_id)
                    continue
                task['ack_id'] = ack_id
                task_queue.put(task)
            logger.debug(_('queueing from {}').format(broker.list_key))
        if event.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Beispiel #14
0
def pusher(task_queue: Queue, event: Event, broker: Broker = None):
    """
    Pulls tasks of the broker and puts them in the task queue
    :type broker:
    :type task_queue: multiprocessing.Queue
    :type event: multiprocessing.Event
    """
    if not broker:
        broker = get_broker()
    logger.info(_(f"{current_process().name} pushing tasks at {current_process().pid}"))
    while True:
        try:
            task_set = broker.dequeue()
        except Exception as e:
            logger.error(e, traceback.format_exc())
            # broker probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task_set:
            for task in task_set:
                ack_id = task[0]
                # unpack the task
                try:
                    task = SignedPackage.loads(task[1])
                except (TypeError, BadSignature) as e:
                    logger.error(e, traceback.format_exc())
                    broker.fail(ack_id)
                    continue
                task["ack_id"] = ack_id
                task_queue.put(task)
            logger.debug(_(f"queueing from {broker.list_key}"))
        if event.is_set():
            break
    logger.info(_(f"{current_process().name} stopped pushing tasks"))
Beispiel #15
0
def monitor(result_queue, broker=None):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    if not broker:
        broker = get_broker()
    name = current_process().name
    logger.info(_(f"{name} monitoring at {current_process().pid}"))
    for task in iter(result_queue.get, "STOP"):
        # save the result
        if task.get("cached", False):
            save_cached(task, broker)
        else:
            save_task(task, broker)
        # acknowledge result
        ack_id = task.pop("ack_id", False)
        if ack_id and (task["success"] or task.get("ack_failure", False)):
            broker.acknowledge(ack_id)
        # log the result
        if task["success"]:
            # log success
            logger.info(_(f"Processed [{task['name']}]"))
        else:
            # log failure
            logger.error(_(f"Failed [{task['name']}] - {task['result']}"))
    logger.info(_(f"{name} stopped monitoring results"))
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
        return
    # enqueues next in a chain
    if task.get("chain", None):
        QUtilities.create_async_tasks_chain(
            task["chain"],
            group=task["group"],
            cached=task["cached"],
            sync=task["sync"],
            broker=broker,
        )
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    close_old_django_connections()
    try:

        kwargs = task.get('kwargs', {})
        schema_name = kwargs.get('schema_name', None)

        if schema_name:

            with schema_context(schema_name):

                if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
                    Success.objects.last().delete()
                # check if this task has previous results

                if Task.objects.filter(id=task["id"], name=task["name"]).exists():
                    existing_task = Task.objects.get(
                        id=task["id"], name=task["name"])
                    # only update the result if it hasn't succeeded yet
                    if not existing_task.success:
                        existing_task.stopped = task["stopped"]
                        existing_task.result = task["result"]
                        existing_task.success = task["success"]
                        existing_task.save()
                else:
                    Task.objects.create(
                        id=task["id"],
                        name=task["name"],
                        func=task["func"],
                        hook=task.get("hook"),
                        args=task["args"],
                        kwargs=task["kwargs"],
                        started=task["started"],
                        stopped=task["stopped"],
                        result=task["result"],
                        group=task.get("group"),
                        success=task["success"],
                    )
        else:

            logger.error('No schema name provided for saving the task')

    except Exception as e:
        logger.error(e)
Beispiel #17
0
 def reincarnate_pusher(self):
     """
     Reincarnate pusher process
     """
     close_old_django_connections()
     self.pusher = self.spawn_pusher()
     logger.error(
         _(f"reincarnated pusher {self.pusher.name} after sudden death"))
     self.reincarnations += 1
Beispiel #18
0
 def reincarnate_monitor(self):
     """
     Reincarnate monitor process
     """
     close_old_django_connections()
     self.monitor = self.spawn_monitor()
     logger.error(
         _(f"reincarnated monitor {self.monitor.name} after sudden death"))
     self.reincarnations += 1
Beispiel #19
0
def save_task(task, broker: Broker):
    """
    Saves the task package to Django or the cache
    :param task: the task package
    :type broker: brokers.Broker
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
        return
    # enqueues next in a chain
    if task.get("chain", None):
        django_q.tasks.async_chain(
            task["chain"],
            group=task["group"],
            cached=task["cached"],
            sync=task["sync"],
            broker=broker,
        )
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    close_old_django_connections()
    try:
        with db.transaction.atomic():
            last = Success.objects.select_for_update().last()
            if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
                last.delete()
        # check if this task has previous results
        if Task.objects.filter(id=task["id"], name=task["name"]).exists():
            existing_task = Task.objects.get(id=task["id"], name=task["name"])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task["stopped"]
                existing_task.result = task["result"]
                existing_task.success = task["success"]
                existing_task.attempt_count = existing_task.attempt_count + 1
                existing_task.save()

            if Conf.MAX_ATTEMPTS > 0 and existing_task.attempt_count >= Conf.MAX_ATTEMPTS:
                broker.acknowledge(task['ack_id'])

        else:
            Task.objects.create(
                id=task["id"],
                name=task["name"],
                func=task["func"],
                hook=task.get("hook"),
                args=task["args"],
                kwargs=task["kwargs"],
                started=task["started"],
                stopped=task["stopped"],
                result=task["result"],
                group=task.get("group"),
                success=task["success"],
                attempt_count=1
            )
    except Exception as e:
        logger.error(e)
Beispiel #20
0
 def create_schedule(func, *args, **kwargs):
     # Wrapper method to create schedule with awareness of schema
     schema_name = kwargs.get("schema_name", connection.schema_name)
     if schema_name:
         with schema_context(schema_name):
             new_schedule = schedule(func, *args, **kwargs)
             new_schedule.save()
             return new_schedule
     else:
         logger.error("No schema name was provided")
         return None
Beispiel #21
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'],
                          group=task['group'],
                          cached=task['cached'],
                          sync=task['sync'],
                          broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        # check if this task has previous results
        if Task.objects.filter(id=task['id'], name=task['name']).exists():
            existing_task = Task.objects.get(id=task['id'], name=task['name'])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task['stopped']
                existing_task.result = task['result']
                existing_task.success = task['success']
                existing_task.task_status = task['task_status']
                existing_task.progress_fraction = task.get(
                    'progress_fraction', 0)
                existing_task.progress_data = task.get('progress_data')
                existing_task.save()
        else:
            Task.objects.create(
                id=task['id'],
                name=task['name'],
                func=task['func'],
                hook=task.get('hook'),
                args=task['args'],
                kwargs=task['kwargs'],
                started=task['started'],
                stopped=task['stopped'],
                result=task['result'],
                group=task.get('group'),
                success=task['success'],
                worker_process_pid=task.get('worker_process_pid'),
                progress_fraction=task.get('progress_fraction', 0),
                progress_data=task.get('progress_data'),
                task_status=task['task_status'],
            )
    except Exception as e:
        import traceback
        traceback.print_exc()
        logger.error("Got exception while saving task: {}".format(e))
Beispiel #22
0
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(
        _('{} ready for work at {}').format(name,
                                            current_process().pid))
    task_count = 0
    # Start reading the task queue
    for pack in iter(task_queue.get, 'STOP'):
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # unpickle the task
        try:
            task = signing.SignedPackage.loads(pack)
        except (TypeError, signing.BadSignature) as e:
            logger.error(e)
            continue
        # Get the function from the task
        logger.info(_('{} processing [{}]').format(name, task['name']))
        f = task['func']
        # if it's not an instance try to get it from the string
        if not callable(task['func']):
            try:
                module, func = f.rsplit('.', 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError) as e:
                result = (e, False)
        # We're still going
        if not result:
            # execute the payload
            timer.value = task['kwargs'].pop('timeout', timeout or 0)  # Busy
            try:
                res = f(*task['args'], **task['kwargs'])
                result = (res, True)
            except Exception as e:
                result = (e, False)
        # Process result
        task['result'] = result[0]
        task['success'] = result[1]
        task['stopped'] = timezone.now()
        result_queue.put(task)
        timer.value = -1  # Idle
        # Recycle
        if task_count == Conf.RECYCLE:
            timer.value = -2  # Recycled
            break
    logger.info(_('{} stopped doing work').format(name))
Beispiel #23
0
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """
    name = current_process().name
    logger.info(_('{} ready for work at {}').format(name, current_process().pid))
    task_count = 0
    # Start reading the task queue
    for pack in iter(task_queue.get, 'STOP'):
        result = None
        timer.value = -1  # Idle
        task_count += 1
        # unpickle the task
        try:
            task = signing.SignedPackage.loads(pack)
        except (TypeError, signing.BadSignature) as e:
            logger.error(e)
            continue
        # Get the function from the task
        logger.info(_('{} processing [{}]').format(name, task['name']))
        f = task['func']
        # if it's not an instance try to get it from the string
        if not callable(task['func']):
            try:
                module, func = f.rsplit('.', 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError) as e:
                result = (e, False)
        # We're still going
        if not result:
            # execute the payload
            timer.value = task['kwargs'].pop('timeout', timeout or 0)  # Busy
            try:
                res = f(*task['args'], **task['kwargs'])
                result = (res, True)
            except Exception as e:
                result = (e, False)
        # Process result
        task['result'] = result[0]
        task['success'] = result[1]
        task['stopped'] = timezone.now()
        result_queue.put(task)
        timer.value = -1  # Idle
        # Recycle
        if task_count == Conf.RECYCLE:
            timer.value = -2  # Recycled
            break
    logger.info(_('{} stopped doing work').format(name))
Beispiel #24
0
def save_cached(task, broker):
    task_key = f'{broker.list_key}:{task["id"]}'
    timeout = task["cached"]
    if timeout is True:
        timeout = None
    try:
        group = task.get("group", None)
        iter_count = task.get("iter_count", 0)
        # if it's a group append to the group list
        if group:
            group_key = f"{broker.list_key}:{group}:keys"
            group_list = broker.cache.get(group_key) or []
            # if it's an iter group, check if we are ready
            if iter_count and len(group_list) == iter_count - 1:
                group_args = f"{broker.list_key}:{group}:args"
                # collate the results into a Task result
                results = [
                    SignedPackage.loads(broker.cache.get(k))["result"]
                    for k in group_list
                ]
                results.append(task["result"])
                task["result"] = results
                task["id"] = group
                task["args"] = SignedPackage.loads(
                    broker.cache.get(group_args))
                task.pop("iter_count", None)
                task.pop("group", None)
                if task.get("iter_cached", None):
                    task["cached"] = task.pop("iter_cached", None)
                    save_cached(task, broker=broker)
                else:
                    save_task(task, broker)
                broker.cache.delete_many(group_list)
                broker.cache.delete_many([group_key, group_args])
                return
            # save the group list
            group_list.append(task_key)
            broker.cache.set(group_key, group_list, timeout)
            # async_task next in a chain
            if task.get("chain", None):
                django_q.tasks.async_chain(
                    task["chain"],
                    group=group,
                    cached=task["cached"],
                    sync=task["sync"],
                    broker=broker,
                )
        # save the task
        broker.cache.set(task_key, SignedPackage.dumps(task), timeout)
    except Exception as e:
        logger.error(e)
Beispiel #25
0
def monitor(result_queue):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        save_task(task)
        if task['success']:
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            logger.error(_("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #26
0
def save_cached(task, broker):
    task_key = '{}:{}'.format(broker.list_key, task['id'])
    timeout = task['cached']
    if timeout is True:
        timeout = None
    try:
        group = task.get('group', None)
        iter_count = task.get('iter_count', 0)
        # if it's a group append to the group list
        if group:
            group_key = '{}:{}:keys'.format(broker.list_key, group)
            group_list = broker.cache.get(group_key) or []
            # if it's an iter group, check if we are ready
            if iter_count and len(group_list) == iter_count - 1:
                group_args = '{}:{}:args'.format(broker.list_key, group)
                # collate the results into a Task result
                results = [
                    signing.SignedPackage.loads(broker.cache.get(k))['result']
                    for k in group_list
                ]
                results.append(task['result'])
                task['result'] = results
                task['id'] = group
                task['args'] = signing.SignedPackage.loads(
                    broker.cache.get(group_args))
                task.pop('iter_count', None)
                task.pop('group', None)
                if task.get('iter_cached', None):
                    task['cached'] = task.pop('iter_cached', None)
                    save_cached(task, broker=broker)
                else:
                    save_task(task, broker)
                broker.cache.delete_many(group_list)
                broker.cache.delete_many([group_key, group_args])
                return
            # save the group list
            group_list.append(task_key)
            broker.cache.set(group_key, group_list, timeout)
            # async next in a chain
            if task.get('chain', None):
                tasks.async_chain(task['chain'],
                                  group=group,
                                  cached=task['cached'],
                                  sync=task['sync'],
                                  broker=broker)
        # save the task
        broker.cache.set(task_key, signing.SignedPackage.dumps(task), timeout)
    except Exception as e:
        logger.error(e)
Beispiel #27
0
def call_hook(sender, instance, **kwargs):
    if instance.hook:
        f = instance.hook
        if not callable(f):
            try:
                module, func = f.rsplit('.', 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError):
                logger.error(_('malformed return hook \'{}\' for [{}]').format(instance.hook, instance.name))
                return
        try:
            f(instance)
        except Exception as e:
            logger.error(_('return hook {} failed on [{}] because {}').format(instance.hook, instance.name, e))
Beispiel #28
0
def monitor(result_queue):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    db.close_old_connections()
    for task in iter(result_queue.get, "STOP"):
        save_task(task)
        if task["success"]:
            logger.info(_("Processed [{}]").format(task["name"]))
        else:
            logger.error(_("Failed [{}] - {}").format(task["name"], task["result"]))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #29
0
def monitor(result_queue):
    """
    Gets finished tasks from the result queue and saves them to Django
    :type result_queue: multiprocessing.Queue
    """
    name = current_process().name
    logger.info(_("{} monitoring at {}").format(name, current_process().pid))
    for task in iter(result_queue.get, 'STOP'):
        save_task(task)
        if task['success']:
            logger.info(_("Processed [{}]").format(task['name']))
        else:
            logger.error(
                _("Failed [{}] - {}").format(task['name'], task['result']))
    logger.info(_("{} stopped monitoring results").format(name))
Beispiel #30
0
def call_hook(sender, instance, **kwargs):
    if instance.hook:
        f = instance.hook
        if not callable(f):
            try:
                module, func = f.rsplit('.', 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError):
                logger.error(_('malformed return hook \'{}\' for [{}]').format(instance.hook, instance.name))
                return
        try:
            f(instance)
        except Exception as e:
            logger.error(_('return hook {} failed on [{}] because {}').format(instance.hook, instance.name, e))
Beispiel #31
0
 def start(self):
     # Start Sentinel
     self.stop_event = Event()
     self.start_event = Event()
     try:
         self.broker.ping()
     except Exception as e:
         logger.error(e)
         return False
     self.sentinel = Process(target=Sentinel,
                             args=(self.stop_event, self.start_event, self.broker, self.timeout))
     self.sentinel.start()
     logger.info(_('Q Cluster-{} starting.').format(self.pid))
     while not self.start_event.is_set():
         sleep(0.1)
     return self.pid
Beispiel #32
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        # check if this task has previous results
        if Task.objects.filter(id=task['id'], name=task['name']).exists():
            existing_task = Task.objects.get(id=task['id'], name=task['name'])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task['stopped']
                existing_task.result = task['result']
                existing_task.success = task['success']
                existing_task.task_status = task['task_status']
                existing_task.progress_fraction = task.get('progress_fraction', 0)
                existing_task.progress_data = task.get('progress_data')
                existing_task.save()
        else:
            Task.objects.create(id=task['id'],
                                name=task['name'],
                                func=task['func'],
                                hook=task.get('hook'),
                                args=task['args'],
                                kwargs=task['kwargs'],
                                started=task['started'],
                                stopped=task['stopped'],
                                result=task['result'],
                                group=task.get('group'),
                                success=task['success'],
                                worker_process_pid=task.get('worker_process_pid'),
                                progress_fraction=task.get('progress_fraction', 0),
                                progress_data=task.get('progress_data'),
                                task_status=task['task_status'],
                                )
    except Exception as e:
        import traceback; traceback.print_exc()
        logger.error("Got exception while saving task: {}".format(e))
Beispiel #33
0
def call_hook(sender, instance, **kwargs):
    if instance.hook:
        f = instance.hook
        if not callable(f):
            try:
                f = import_function(f)
            except (ValueError, ImportError, AttributeError):
                logger.error(
                    _(f"malformed return hook '{instance.hook}' for [{instance.name}]"
                      ))
                return
        try:
            f(instance)
        except Exception as e:
            logger.error(
                _(f"return hook {instance.hook} failed on [{instance.name}] because {str(e)}"
                  ))
Beispiel #34
0
def save_cached(task, broker):
    task_key = '{}:{}'.format(broker.list_key, task['id'])
    timeout = task['cached']
    if timeout is True:
        timeout = None
    try:
        group = task.get('group', None)
        iter_count = task.get('iter_count', 0)
        # if it's a group append to the group list
        if group:
            task_key = '{}:{}:{}'.format(broker.list_key, group, task['id'])
            group_key = '{}:{}:keys'.format(broker.list_key, group)
            group_list = broker.cache.get(group_key) or []
            # if it's an iter group, check if we are ready
            if iter_count and len(group_list) == iter_count - 1:
                group_args = '{}:{}:args'.format(broker.list_key, group)
                # collate the results into a Task result
                results = [signing.SignedPackage.loads(broker.cache.get(k))['result'] for k in group_list]
                results.append(task['result'])
                task['result'] = results
                task['id'] = group
                task['args'] = signing.SignedPackage.loads(broker.cache.get(group_args))
                task.pop('iter_count', None)
                task.pop('group', None)
                if task.get('iter_cached', None):
                    task['cached'] = task.pop('iter_cached', None)
                    save_cached(task, broker=broker)
                else:
                    save_task(task, broker)
                broker.cache.delete_many(group_list)
                broker.cache.delete_many([group_key, group_args])
                return
            # save the group list
            group_list.append(task_key)
            broker.cache.set(group_key, group_list, timeout)
            # async next in a chain
            if task.get('chain', None):
                tasks.async_chain(task['chain'], group=group, cached=task['cached'], sync=task['sync'], broker=broker)
        # save the task
        broker.cache.set(task_key,
                         signing.SignedPackage.dumps(task),
                         timeout)
    except Exception as e:
        logger.error(e)
Beispiel #35
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
        return
    # enqueues next in a chain
    if task.get('chain', None):
        django_q.tasks.async_chain(task['chain'],
                                   group=task['group'],
                                   cached=task['cached'],
                                   sync=task['sync'],
                                   broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    if db.transaction.get_autocommit():
        db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        # check if this task has previous results
        if Task.objects.filter(id=task['id'], name=task['name']).exists():
            existing_task = Task.objects.get(id=task['id'], name=task['name'])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task['stopped']
                existing_task.result = task['result']
                existing_task.success = task['success']
                existing_task.save()
        else:
            Task.objects.create(id=task['id'],
                                name=task['name'],
                                func=task['func'],
                                hook=task.get('hook'),
                                args=task['args'],
                                kwargs=task['kwargs'],
                                started=task['started'],
                                stopped=task['stopped'],
                                result=task['result'],
                                group=task.get('group'),
                                success=task['success'])
    except Exception as e:
        logger.error(e)
Beispiel #36
0
def call_hook(sender, instance, **kwargs):
    if instance.hook:
        f = instance.hook
        if not callable(f):
            try:
                module, func = f.rsplit(".", 1)
                m = importlib.import_module(module)
                f = getattr(m, func)
            except (ValueError, ImportError, AttributeError):
                logger.error(
                    _(f"malformed return hook '{instance.hook}' for [{instance.name}]"
                      ))
                return
        try:
            f(instance)
        except Exception as e:
            logger.error(
                _(f"return hook {instance.hook} failed on [{instance.name}] because {str(e)}"
                  ))
Beispiel #37
0
    def reincarnate_worker(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        close_old_django_connections()
        self.pool.remove(process)
        # Delete Worker model entry
        WorkerModel.objects.filter(id=process.id).delete()
        self.spawn_worker()
        if process.timer.value == 0:
            # only need to terminate on timeout, otherwise we risk destabilizing the queues
            process.terminate()
            logger.warning(
                _(f"reincarnated worker {process.name} after timeout"))
        elif int(process.timer.value) == -2:
            logger.info(_(f"recycled worker {process.name}"))
        else:
            logger.error(_(f"reincarnated worker {process.name} after death"))

        self.reincarnations += 1
Beispiel #38
0
    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        db.connections.close_all()  # Close any old connections
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(_("reincarnated monitor {} after sudden death").format(process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(_("reincarnated pusher {} after sudden death").format(process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(_("reincarnated worker {} after timeout").format(process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(_("reincarnated worker {} after death").format(process.name))

        self.reincarnations += 1
Beispiel #39
0
    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        if not Conf.SYNC:
            db.connections.close_all()  # Close any old connections
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _(f"reincarnated monitor {process.name} after sudden death"))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _(f"reincarnated pusher {process.name} after sudden death"))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if process.timer.value == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warning(
                    _(f"reincarnated worker {process.name} after timeout"))
            elif int(process.timer.value) == -2:
                logger.info(_(f"recycled worker {process.name}"))
            else:
                logger.error(
                    _(f"reincarnated worker {process.name} after death"))

        self.reincarnations += 1
Beispiel #40
0
    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _("reincarnated monitor {} after sudden death").format(
                    process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _("reincarnated pusher {} after sudden death").format(
                    process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(
                    _("reincarnated worker {} after timeout").format(
                        process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(
                    _("reincarnated worker {} after death").format(
                        process.name))

        self.reincarnations += 1
Beispiel #41
0
def save_cached(task, broker):
    task_key = "{}:{}".format(broker.list_key, task["id"])
    timeout = task["cached"]
    if timeout is True:
        timeout = None
    try:
        group = task.get("group", False)
        iter_count = task.get("iter_count", 0)
        # if it's a group append to the group list
        if group:
            task_key = "{}:{}:{}".format(broker.list_key, group, task["id"])
            group_key = "{}:{}:keys".format(broker.list_key, group)
            group_list = broker.cache.get(group_key) or []
            # if it's an inter group, check if we are ready
            if iter_count and len(group_list) == iter_count - 1:
                group_args = "{}:{}:args".format(broker.list_key, group)
                # collate the results into a Task result
                results = [signing.SignedPackage.loads(broker.cache.get(k))["result"] for k in group_list]
                results.append(task["result"])
                task["result"] = results
                task["id"] = group
                task["args"] = signing.SignedPackage.loads(broker.cache.get(group_args))
                task.pop("iter_count", None)
                task.pop("group", None)
                if task.get("iter_cached", None):
                    task["cached"] = task.pop("iter_cached", None)
                    save_cached(task, broker=broker)
                else:
                    save_task(task)
                broker.cache.delete_many(group_list)
                broker.cache.delete_many([group_key, group_args])
                return
            # save the group list
            group_list.append(task_key)
            broker.cache.set(group_key, group_list)
        # save the task
        broker.cache.set(task_key, signing.SignedPackage.dumps(task), timeout)
    except Exception as e:
        logger.error(e)
Beispiel #42
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
        return
    # enqueues next in a chain
    if task.get('chain', None):
        django_q.tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
            Success.objects.last().delete()
        # check if this task has previous results
        if Task.objects.filter(id=task['id'], name=task['name']).exists():
            existing_task = Task.objects.get(id=task['id'], name=task['name'])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task['stopped']
                existing_task.result = task['result']
                existing_task.success = task['success']
                existing_task.save()
        else:
            Task.objects.create(id=task['id'],
                                name=task['name'],
                                func=task['func'],
                                hook=task.get('hook'),
                                args=task['args'],
                                kwargs=task['kwargs'],
                                started=task['started'],
                                stopped=task['stopped'],
                                result=task['result'],
                                group=task.get('group'),
                                success=task['success']
                                )
    except Exception as e:
        logger.error(e)
Beispiel #43
0
def pusher(task_queue, e, list_key=Conf.Q_LIST, r=redis_client):
    """
    Pulls tasks of the Redis List and puts them in the task queue
    :type task_queue: multiprocessing.Queue
    :type e: multiprocessing.Event
    :type list_key: str
    """
    logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
    while True:
        try:
            task = r.blpop(list_key, 1)
        except Exception as e:
            logger.error(e)
            # redis probably crashed. Let the sentinel handle it.
            sleep(10)
            break
        if task:
            task_queue.put(task[1])
            logger.debug(_('queueing from {}').format(list_key))
        if e.is_set():
            break
    logger.info(_("{} stopped pushing tasks").format(current_process().name))
Beispiel #44
0
 def save(self):
     try:
         self.broker.set_stat(self.key,
                              signing.SignedPackage.dumps(self, True), 3)
     except Exception as e:
         logger.error(e)
Beispiel #45
0
 def ping(self):
     try:
         return self.connection.ping()
     except redis.ConnectionError as e:
         logger.error('Can not connect to Redis server.')
         raise e
Beispiel #46
0
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    db.close_old_connections()
    try:
        for s in Schedule.objects.exclude(repeats=0).filter(
                next_run__lt=timezone.now()):
            args = ()
            kwargs = {}
            # get args, kwargs and hook
            if s.kwargs:
                try:
                    # eval should be safe here because dict()
                    kwargs = eval('dict({})'.format(s.kwargs))
                except SyntaxError:
                    kwargs = {}
            if s.args:
                args = ast.literal_eval(s.args)
                # single value won't eval to tuple, so:
                if type(args) != tuple:
                    args = (args, )
            q_options = kwargs.get('q_options', {})
            if s.hook:
                q_options['hook'] = s.hook
            # set up the next run time
            if not s.schedule_type == s.ONCE:
                next_run = arrow.get(s.next_run)
                while True:
                    if s.schedule_type == s.MINUTES:
                        next_run = next_run.replace(minutes=+(s.minutes or 1))
                    elif s.schedule_type == s.HOURLY:
                        next_run = next_run.replace(hours=+1)
                    elif s.schedule_type == s.DAILY:
                        next_run = next_run.replace(days=+1)
                    elif s.schedule_type == s.WEEKLY:
                        next_run = next_run.replace(weeks=+1)
                    elif s.schedule_type == s.MONTHLY:
                        next_run = next_run.replace(months=+1)
                    elif s.schedule_type == s.QUARTERLY:
                        next_run = next_run.replace(months=+3)
                    elif s.schedule_type == s.YEARLY:
                        next_run = next_run.replace(years=+1)
                    if Conf.CATCH_UP or next_run > arrow.utcnow():
                        break
                s.next_run = next_run.datetime
                s.repeats += -1
            # send it to the cluster
            q_options['broker'] = broker
            q_options['group'] = s.name or s.id
            kwargs['q_options'] = q_options
            s.task = tasks. async (s.func, *args, **kwargs)
            # log it
            if not s.task:
                logger.error(
                    _('{} failed to create a task from schedule [{}]').format(
                        current_process().name, s.name or s.id))
            else:
                logger.info(
                    _('{} created a task from schedule [{}]').format(
                        current_process().name, s.name or s.id))
            # default behavior is to delete a ONCE schedule
            if s.schedule_type == s.ONCE:
                if s.repeats < 0:
                    return s.delete()
                # but not if it has a positive repeats
                s.repeats = 0
            # save the schedule
            s.save()
    except Exception as e:
        logger.error(e)
Beispiel #47
0
 def ping(self):
     try:
         return self.connection.ping()
     except redis.ConnectionError as e:
         logger.error('Can not connect to Redis server.')
         raise e
Beispiel #48
0
def ping_redis(r):
    try:
        r.ping()
    except Exception as e:
        logger.error('Can not connect to Redis server.')
        raise e
Beispiel #49
0
 def save(self):
     try:
         self.r.set(self.key, SignedPackage.dumps(self, True), 3)
     except Exception as e:
         logger.error(e)
Beispiel #50
0
def ping_redis(r):
    try:
        r.ping()
    except Exception as e:
        logger.error("Can not connect to Redis server.")
        raise e
Beispiel #51
0
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    close_old_django_connections()
    try:
        with db.transaction.atomic(using=Schedule.objects.db):
            for s in (Schedule.objects.select_for_update().exclude(
                    repeats=0).filter(next_run__lt=timezone.now())):
                args = ()
                kwargs = {}
                # get args, kwargs and hook
                if s.kwargs:
                    try:
                        # eval should be safe here because dict()
                        kwargs = eval(f"dict({s.kwargs})")
                    except SyntaxError:
                        kwargs = {}
                if s.args:
                    args = ast.literal_eval(s.args)
                    # single value won't eval to tuple, so:
                    if type(args) != tuple:
                        args = (args, )
                q_options = kwargs.get("q_options", {})
                if s.hook:
                    q_options["hook"] = s.hook
                # set up the next run time
                if not s.schedule_type == s.ONCE:
                    next_run = arrow.get(s.next_run)
                    while True:
                        if s.schedule_type == s.MINUTES:
                            next_run = next_run.shift(
                                minutes=+(s.minutes or 1))
                        elif s.schedule_type == s.HOURLY:
                            next_run = next_run.shift(hours=+1)
                        elif s.schedule_type == s.DAILY:
                            next_run = next_run.shift(days=+1)
                        elif s.schedule_type == s.WEEKLY:
                            next_run = next_run.shift(weeks=+1)
                        elif s.schedule_type == s.MONTHLY:
                            next_run = next_run.shift(months=+1)
                        elif s.schedule_type == s.QUARTERLY:
                            next_run = next_run.shift(months=+3)
                        elif s.schedule_type == s.YEARLY:
                            next_run = next_run.shift(years=+1)
                        if Conf.CATCH_UP or next_run > arrow.utcnow():
                            break
                    # arrow always returns a tz aware datetime, and we don't want
                    # this when we explicitly configured django with USE_TZ=False
                    s.next_run = next_run.datetime if settings.USE_TZ else next_run.datetime.replace(
                        tzinfo=None)
                    s.repeats += -1
                # send it to the cluster
                q_options["broker"] = broker
                q_options["group"] = q_options.get("group", s.name or s.id)
                kwargs["q_options"] = q_options
                s.task = django_q.tasks.async_task(s.func, *args, **kwargs)
                # log it
                if not s.task:
                    logger.error(
                        _(f"{current_process().name} failed to create a task from schedule [{s.name or s.id}]"
                          ))
                else:
                    logger.info(
                        _(f"{current_process().name} created a task from schedule [{s.name or s.id}]"
                          ))
                # default behavior is to delete a ONCE schedule
                if s.schedule_type == s.ONCE:
                    if s.repeats < 0:
                        s.delete()
                        continue
                    # but not if it has a positive repeats
                    s.repeats = 0
                # save the schedule
                s.save()
    except Exception as e:
        logger.error(e)
Beispiel #52
0
 def save(self):
     try:
         self.broker.set_stat(self.key, signing.SignedPackage.dumps(self, True), 3)
     except Exception as e:
         logger.error(e)
Beispiel #53
0
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    db.close_old_connections()
    try:
        for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):
            args = ()
            kwargs = {}
            # get args, kwargs and hook
            if s.kwargs:
                try:
                    # eval should be safe here because dict()
                    kwargs = eval('dict({})'.format(s.kwargs))
                except SyntaxError:
                    kwargs = {}
            if s.args:
                args = ast.literal_eval(s.args)
                # single value won't eval to tuple, so:
                if type(args) != tuple:
                    args = (args,)
            q_options = kwargs.get('q_options', {})
            if s.hook:
                q_options['hook'] = s.hook
            # set up the next run time
            if not s.schedule_type == s.ONCE:
                next_run = arrow.get(s.next_run)
                while True:
                    if s.schedule_type == s.MINUTES:
                        next_run = next_run.replace(minutes=+(s.minutes or 1))
                    elif s.schedule_type == s.HOURLY:
                        next_run = next_run.replace(hours=+1)
                    elif s.schedule_type == s.DAILY:
                        next_run = next_run.replace(days=+1)
                    elif s.schedule_type == s.WEEKLY:
                        next_run = next_run.replace(weeks=+1)
                    elif s.schedule_type == s.MONTHLY:
                        next_run = next_run.replace(months=+1)
                    elif s.schedule_type == s.QUARTERLY:
                        next_run = next_run.replace(months=+3)
                    elif s.schedule_type == s.YEARLY:
                        next_run = next_run.replace(years=+1)
                    if Conf.CATCH_UP or next_run > arrow.utcnow():
                        break
                s.next_run = next_run.datetime
                s.repeats += -1
            # send it to the cluster
            q_options['broker'] = broker
            q_options['group'] = q_options.get('group', s.name or s.id)
            kwargs['q_options'] = q_options
            s.task = tasks.async(s.func, *args, **kwargs)
            # log it
            if not s.task:
                logger.error(
                        _('{} failed to create a task from schedule [{}]').format(current_process().name,
                                                                                  s.name or s.id))
            else:
                logger.info(
                        _('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
            # default behavior is to delete a ONCE schedule
            if s.schedule_type == s.ONCE:
                if s.repeats < 0:
                    s.delete()
                    continue
                # but not if it has a positive repeats
                s.repeats = 0
            # save the schedule
            s.save()
    except Exception as e:
        logger.error(e)
Beispiel #54
0
def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get('save', Conf.SAVE_LIMIT >= 0) and task['success']:
        return
    # async next in a chain
    if task.get('chain', None):
        tasks.async_chain(task['chain'], group=task['group'], cached=task['cached'], sync=task['sync'], broker=broker)
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    db.close_old_connections()
    try:
        # race condition in original code, fixed below
        #if task['success'] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
        #    Success.objects.last().delete()
        # check if this task has previous results
        if Task.objects.filter(id=task['id'], name=task['name']).exists():
            existing_task = Task.objects.get(id=task['id'], name=task['name'])
            # only update the result if it hasn't succeeded yet
            if not existing_task.success:
                existing_task.stopped = task['stopped']
                existing_task.result = task['result']
                existing_task.success = task['success']
                existing_task.save()
        else:
            Task.objects.create(id=task['id'],
                                name=task['name'],
                                func=task['func'],
                                hook=task.get('hook'),
                                args=task['args'],
                                kwargs=task['kwargs'],
                                started=task['started'],
                                stopped=task['stopped'],
                                result=task['result'],
                                group=task.get('group'),
                                success=task['success']
                                )
        # fix for multiple clusters: clean old successful tasks after succeeding a new one
        # with a separate solution for MySQL (to avoid limit in subquery)
        if db.connection.vendor == 'mysql':
            with db.connection.cursor() as cursor:
                cursor.execute(
                    '''
                        DELETE  d
                        FROM    django_q_task AS d
                        LEFT JOIN
                                (
                                SELECT  id
                                FROM    django_q_task
                                WHERE success IS TRUE
                                ORDER BY
                                        stopped DESC
                                LIMIT %s
                                ) AS q
                        ON      d.id = q.id
                        WHERE   d.success IS TRUE AND q.id IS NULL
                    ''',
                    [Conf.SAVE_LIMIT]
                )
        else:
            Success.objects.exclude(id__in=Success.objects.all()[:Conf.SAVE_LIMIT]).delete()

    except Exception as e:
        logger.error(e)