def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
        return
    # enqueues next in a chain
    if task.get("chain", None):
        QUtilities.create_async_tasks_chain(
            task["chain"],
            group=task["group"],
            cached=task["cached"],
            sync=task["sync"],
            broker=broker,
        )
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    close_old_django_connections()
    try:

        kwargs = task.get('kwargs', {})
        schema_name = kwargs.get('schema_name', None)

        if schema_name:

            with schema_context(schema_name):

                if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
                    Success.objects.last().delete()
                # check if this task has previous results

                if Task.objects.filter(id=task["id"], name=task["name"]).exists():
                    existing_task = Task.objects.get(
                        id=task["id"], name=task["name"])
                    # only update the result if it hasn't succeeded yet
                    if not existing_task.success:
                        existing_task.stopped = task["stopped"]
                        existing_task.result = task["result"]
                        existing_task.success = task["success"]
                        existing_task.save()
                else:
                    Task.objects.create(
                        id=task["id"],
                        name=task["name"],
                        func=task["func"],
                        hook=task.get("hook"),
                        args=task["args"],
                        kwargs=task["kwargs"],
                        started=task["started"],
                        stopped=task["stopped"],
                        result=task["result"],
                        group=task.get("group"),
                        success=task["success"],
                    )
        else:

            logger.error('No schema name provided for saving the task')

    except Exception as e:
        logger.error(e)
    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        close_old_django_connections()
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _(f"reincarnated monitor {process.name} after sudden death"))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _(f"reincarnated pusher {process.name} after sudden death"))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if process.timer.value == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(
                    _(f"reincarnated worker {process.name} after timeout"))
            elif int(process.timer.value) == -2:
                logger.info(_(f"recycled worker {process.name}"))
            else:
                logger.error(
                    _(f"reincarnated worker {process.name} after death"))

        self.reincarnations += 1
 def spawn_cluster(self):
     self.pool = []
     Stat(self).save()
     close_old_django_connections()
     # spawn worker pool
     for __ in range(self.pool_size):
         self.spawn_worker()
     # spawn auxiliary
     self.monitor = self.spawn_monitor()
     self.pusher = self.spawn_pusher()
     # set worker cpu affinity if needed
     if psutil and Conf.CPU_AFFINITY:
         set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    close_old_django_connections()
    tenant_model = get_tenant_model()
    tenant_schemas_to_exclude = getattr(
        settings, 'SCHEMAS_TO_BE_EXCLUDED_BY_SCHEDULER', ['public'])

    try:
        for tenant in tenant_model.objects.exclude(schema_name__in=tenant_schemas_to_exclude):
            with schema_context(tenant.schema_name):
                with db.transaction.atomic():
                    for s in (
                        Schedule.objects.select_for_update()
                        .exclude(repeats=0)
                        .filter(next_run__lt=timezone.now())
                    ):
                        args = ()
                        kwargs = {}
                        # get args, kwargs and hook
                        if s.kwargs:
                            try:
                                # eval should be safe here because dict()
                                kwargs = eval(f"dict({s.kwargs})")
                            except SyntaxError:
                                kwargs = {}
                        if s.args:
                            args = ast.literal_eval(s.args)
                            # single value won't eval to tuple, so:
                            if type(args) != tuple:
                                args = (args,)
                        q_options = kwargs.get("q_options", {})
                        if s.hook:
                            q_options["hook"] = s.hook
                        # set up the next run time
                        if not s.schedule_type == s.ONCE:
                            next_run = arrow.get(s.next_run)
                            while True:
                                if s.schedule_type == s.MINUTES:
                                    next_run = next_run.shift(
                                        minutes=+(s.minutes or 1))
                                elif s.schedule_type == s.HOURLY:
                                    next_run = next_run.shift(hours=+1)
                                elif s.schedule_type == s.DAILY:
                                    next_run = next_run.shift(days=+1)
                                elif s.schedule_type == s.WEEKLY:
                                    next_run = next_run.shift(weeks=+1)
                                elif s.schedule_type == s.MONTHLY:
                                    next_run = next_run.shift(months=+1)
                                elif s.schedule_type == s.QUARTERLY:
                                    next_run = next_run.shift(months=+3)
                                elif s.schedule_type == s.YEARLY:
                                    next_run = next_run.shift(years=+1)
                                elif s.schedule_type == s.CRON:
                                    next_run = croniter(s.cron, timezone.datetime.now()).get_next(timezone.datetime)
                                if Conf.CATCH_UP or next_run > arrow.utcnow():
                                    break
                            if s.schedule_type == s.CRON:
                                s.next_run = next_run
                            else:
                                s.next_run = next_run.datetime
                            s.repeats += -1
                        # send it to the cluster
                        q_options["broker"] = broker
                        q_options["group"] = q_options.get(
                            "group", s.name or s.id)
                        kwargs["q_options"] = q_options
                        s.task = QUtilities.add_async_task(
                            s.func, *args, **kwargs)
                        # log it
                        if not s.task:
                            logger.error(
                                _(
                                    f"{current_process().name} failed to create a task from schedule [{s.name or s.id}] under tenant {kwargs.get('schema_name', 'UNSPECIFIED')}"
                                )
                            )
                        else:
                            logger.info(
                                _(
                                    f"{current_process().name} created a task from schedule [{s.name or s.id}] under tenant {kwargs.get('schema_name', 'UNSPECIFIED')}"
                                )
                            )
                        # default behavior is to delete a ONCE schedule
                        if s.schedule_type == s.ONCE:
                            if s.repeats < 0:
                                s.delete()
                                continue
                            # but not if it has a positive repeats
                            s.repeats = 0
                        # save the schedule
                        s.save()
    except Exception as e:
        logger.error(e)
def worker(task_queue, result_queue, timer, timeout=Conf.TIMEOUT):
    """
    Takes a task from the task queue, tries to execute it and puts the result back in the result queue
    :type task_queue: multiprocessing.Queue
    :type result_queue: multiprocessing.Queue
    :type timer: multiprocessing.Value
    """

    name = current_process().name
    logger.info(_(f"{name} ready for work at {current_process().pid}"))
    task_count = 0
    if timeout is None:
        timeout = -1

    try:
        # Start reading the task queue

        for task in iter(task_queue.get, "STOP"):
            result = None
            timer.value = -1  # Idle
            task_count += 1
            # Get the function from the task
            logger.info(_(f'{name} processing [{task["name"]}]'))
            f = task["func"]
            # if it's not an instance try to get it from the string
            if not callable(task["func"]):
                try:
                    module, func = f.rsplit(".", 1)
                    m = importlib.import_module(module)
                    f = getattr(m, func)
                except (ValueError, ImportError, AttributeError) as e:
                    result = (e, False)
                    if error_reporter:
                        error_reporter.report()
            # We're still going
            if not result:
                close_old_django_connections()
                timer_value = task.pop("timeout", timeout)
                # signal execution
                pre_execute.send(sender="django_q", func=f, task=task)
                # execute the payload
                timer.value = timer_value  # Busy
                try:

                    # Checking for the presence of kwargs
                    args_state = getfullargspec(f)

                    kwargs = task.get('kwargs', {})
                    schema_name = kwargs.get('schema_name', None)
                    if schema_name:

                        with schema_context(schema_name):

                            if args_state.varkw:
                                res = f(*task["args"], **task["kwargs"])
                            else:
                                res = f(*task["args"])
                            result = (res, True)
                    else:
                        result = (None, False)

                except Exception as e:
                    result = (f"{e} : {traceback.format_exc()}", False)
                    if error_reporter:
                        error_reporter.report()
            with timer.get_lock():
                # Process result
                task["result"] = result[0]
                task["success"] = result[1]
                task["stopped"] = timezone.now()
                result_queue.put(task)
                timer.value = -1  # Idle
                # Recycle
                if task_count == Conf.RECYCLE:
                    timer.value = -2  # Recycled
                    break
        logger.info(_(f"{name} stopped doing work"))
    except Exception as e:
        print(e)