def save_task(task, broker):
    """
    Saves the task package to Django or the cache
    """
    # SAVE LIMIT < 0 : Don't save success
    if not task.get("save", Conf.SAVE_LIMIT >= 0) and task["success"]:
        return
    # enqueues next in a chain
    if task.get("chain", None):
        QUtilities.create_async_tasks_chain(
            task["chain"],
            group=task["group"],
            cached=task["cached"],
            sync=task["sync"],
            broker=broker,
        )
    # SAVE LIMIT > 0: Prune database, SAVE_LIMIT 0: No pruning
    close_old_django_connections()
    try:

        kwargs = task.get('kwargs', {})
        schema_name = kwargs.get('schema_name', None)

        if schema_name:

            with schema_context(schema_name):

                if task["success"] and 0 < Conf.SAVE_LIMIT <= Success.objects.count():
                    Success.objects.last().delete()
                # check if this task has previous results

                if Task.objects.filter(id=task["id"], name=task["name"]).exists():
                    existing_task = Task.objects.get(
                        id=task["id"], name=task["name"])
                    # only update the result if it hasn't succeeded yet
                    if not existing_task.success:
                        existing_task.stopped = task["stopped"]
                        existing_task.result = task["result"]
                        existing_task.success = task["success"]
                        existing_task.save()
                else:
                    Task.objects.create(
                        id=task["id"],
                        name=task["name"],
                        func=task["func"],
                        hook=task.get("hook"),
                        args=task["args"],
                        kwargs=task["kwargs"],
                        started=task["started"],
                        stopped=task["stopped"],
                        result=task["result"],
                        group=task.get("group"),
                        success=task["success"],
                    )
        else:

            logger.error('No schema name provided for saving the task')

    except Exception as e:
        logger.error(e)
Exemplo n.º 2
0
    def test_async_task(self):
        with schema_context('testone'):
            task_id = QUtilities.add_async_task(
                'core.tasks.print_users_in_tenant')
            print(QUtilities.fetch_task(task_id))

        with schema_context('testtwo'):
            task_id = QUtilities.add_async_task(
                'core.tasks.print_users_in_tenant')
            print(QUtilities.fetch_task(task_id))
 def append(self, func, *args, **kwargs):
     """
     add a task to the chain
     takes the same parameters as async_task()
     """
     self.chain.append((func, args, kwargs))
     # remove existing results
     if self.started:
         QUtilities.delete_task_group(self.group)
         self.started = False
     return self.length()
def save_cached(task, broker):
    task_key = f'{broker.list_key}:{task["id"]}'
    timeout = task["cached"]
    if timeout is True:
        timeout = None
    try:
        group = task.get("group", None)
        iter_count = task.get("iter_count", 0)
        # if it's a group append to the group list
        if group:
            group_key = f"{broker.list_key}:{group}:keys"
            group_list = broker.cache.get(group_key) or []
            # if it's an iter group, check if we are ready
            if iter_count and len(group_list) == iter_count - 1:
                group_args = f"{broker.list_key}:{group}:args"
                # collate the results into a Task result
                results = [
                    SignedPackage.loads(broker.cache.get(k))["result"]
                    for k in group_list
                ]
                results.append(task["result"])
                task["result"] = results
                task["id"] = group
                task["args"] = SignedPackage.loads(broker.cache.get(group_args))
                task.pop("iter_count", None)
                task.pop("group", None)
                if task.get("iter_cached", None):
                    task["cached"] = task.pop("iter_cached", None)
                    save_cached(task, broker=broker)
                else:
                    save_task(task, broker)
                broker.cache.delete_many(group_list)
                broker.cache.delete_many([group_key, group_args])
                return
            # save the group list
            group_list.append(task_key)
            broker.cache.set(group_key, group_list, timeout)
            # async_task next in a chain
            if task.get("chain", None):
                QUtilities.create_async_tasks_chain(
                    task["chain"],
                    group=group,
                    cached=task["cached"],
                    sync=task["sync"],
                    broker=broker,
                )
        # save the task
        broker.cache.set(task_key, SignedPackage.dumps(task), timeout)
    except Exception as e:
        logger.error(e)
Exemplo n.º 5
0
    def test_iter(self):

        broker = get_broker()
        broker.purge_queue()
        broker.cache.clear()

        numbers = [random.randrange(1, 30) * 0.75 for i in range(1, 5)]
        with schema_context('testone'):
            task_1 = QUtilities.add_async_tasks_from_iter('math.floor',
                                                          numbers,
                                                          sync=True)
            result = QUtilities.get_result(task_1)
            print(numbers, result)
            assert result is not None
            broker.cache.clear()
 def current(self):
     """
     get the index of the currently executing chain element
     :return int: current chain index
     """
     if not self.started:
         return None
     return QUtilities.get_group_count(self.group, cached=self.cached)
 def fetch(self, wait=0):
     """
     get the task result objects.
     :param int wait: how many milliseconds to wait for a result
     :return: an unsorted list of task objects
     """
     if self.started:
         return QUtilities.fetch_task(self.id,
                                      wait=wait,
                                      cached=self.cached)
 def result(self, wait=0):
     """
     return the full list of results.
     :param int wait: how many milliseconds to wait for a result
     :return: an unsorted list of results
     """
     if self.started:
         return QUtilities.get_result(self.id,
                                      wait=wait,
                                      cached=self.cached)
    def fetch_group(self, failures=True, wait=0, count=None):

        if self.started and self.group:
            return QUtilities.fetch_task_group(
                self.group,
                failures=failures,
                wait=wait,
                count=count,
                cached=self.cached,
            )
    def result_group(self, failures=False, wait=0, count=None):

        if self.started and self.group:
            return QUtilities.get_result_group(
                self.group,
                failures=failures,
                wait=wait,
                count=count,
                cached=self.cached,
            )
 def result(self, wait=0):
     """
     return the full list of results from the chain when it finishes. blocks until timeout.
     :param int wait: how many milliseconds to wait for a result
     :return: an unsorted list of results
     """
     if self.started:
         return QUtilities.get_result_group(self.group,
                                            wait=wait,
                                            count=self.length(),
                                            cached=self.cached)
 def run(self):
     """
     Start queueing the tasks to the worker cluster
     :return: the task id
     """
     self.kwargs["cached"] = self.cached
     self.kwargs["sync"] = self.sync
     self.kwargs["broker"] = self.broker
     self.id = QUtilities.add_async_tasks_from_iter(self.func, self.args,
                                                    **self.kwargs)
     self.started = True
     return self.id
 def run(self):
     """
     Start queueing the chain to the worker cluster
     :return: the chain's group id
     """
     self.group = QUtilities.create_async_tasks_chain(
         chain=self.chain[:],
         group=self.group,
         cached=self.cached,
         sync=self.sync,
         broker=self.broker,
     )
     self.started = True
     return self.group
 def fetch(self, failures=True, wait=0):
     """
     get the task result objects from the chain when it finishes. blocks until timeout.
     :param failures: include failed tasks
     :param int wait: how many milliseconds to wait for a result
     :return: an unsorted list of task objects
     """
     if self.started:
         return QUtilities.fetch_task_group(
             self.group,
             failures=failures,
             wait=wait,
             count=self.length(),
             cached=self.cached,
         )
 def run(self):
     self.id = QUtilities.add_async_task(self.func, *self.args,
                                         **self.kwargs)
     self.started = True
     return self.id
    def fetch(self, wait=0):

        if self.started:
            return QUtilities.fetch_task(self.id,
                                         wait=wait,
                                         cached=self.cached)
    def result(self, wait=0):

        if self.started:
            return QUtilities.get_result(self.id,
                                         wait=wait,
                                         cached=self.cached)
def scheduler(broker=None):
    """
    Creates a task from a schedule at the scheduled time and schedules next run
    """
    if not broker:
        broker = get_broker()
    close_old_django_connections()
    tenant_model = get_tenant_model()
    tenant_schemas_to_exclude = getattr(
        settings, 'SCHEMAS_TO_BE_EXCLUDED_BY_SCHEDULER', ['public'])

    try:
        for tenant in tenant_model.objects.exclude(schema_name__in=tenant_schemas_to_exclude):
            with schema_context(tenant.schema_name):
                with db.transaction.atomic():
                    for s in (
                        Schedule.objects.select_for_update()
                        .exclude(repeats=0)
                        .filter(next_run__lt=timezone.now())
                    ):
                        args = ()
                        kwargs = {}
                        # get args, kwargs and hook
                        if s.kwargs:
                            try:
                                # eval should be safe here because dict()
                                kwargs = eval(f"dict({s.kwargs})")
                            except SyntaxError:
                                kwargs = {}
                        if s.args:
                            args = ast.literal_eval(s.args)
                            # single value won't eval to tuple, so:
                            if type(args) != tuple:
                                args = (args,)
                        q_options = kwargs.get("q_options", {})
                        if s.hook:
                            q_options["hook"] = s.hook
                        # set up the next run time
                        if not s.schedule_type == s.ONCE:
                            next_run = arrow.get(s.next_run)
                            while True:
                                if s.schedule_type == s.MINUTES:
                                    next_run = next_run.shift(
                                        minutes=+(s.minutes or 1))
                                elif s.schedule_type == s.HOURLY:
                                    next_run = next_run.shift(hours=+1)
                                elif s.schedule_type == s.DAILY:
                                    next_run = next_run.shift(days=+1)
                                elif s.schedule_type == s.WEEKLY:
                                    next_run = next_run.shift(weeks=+1)
                                elif s.schedule_type == s.MONTHLY:
                                    next_run = next_run.shift(months=+1)
                                elif s.schedule_type == s.QUARTERLY:
                                    next_run = next_run.shift(months=+3)
                                elif s.schedule_type == s.YEARLY:
                                    next_run = next_run.shift(years=+1)
                                elif s.schedule_type == s.CRON:
                                    next_run = croniter(s.cron, timezone.datetime.now()).get_next(timezone.datetime)
                                if Conf.CATCH_UP or next_run > arrow.utcnow():
                                    break
                            if s.schedule_type == s.CRON:
                                s.next_run = next_run
                            else:
                                s.next_run = next_run.datetime
                            s.repeats += -1
                        # send it to the cluster
                        q_options["broker"] = broker
                        q_options["group"] = q_options.get(
                            "group", s.name or s.id)
                        kwargs["q_options"] = q_options
                        s.task = QUtilities.add_async_task(
                            s.func, *args, **kwargs)
                        # log it
                        if not s.task:
                            logger.error(
                                _(
                                    f"{current_process().name} failed to create a task from schedule [{s.name or s.id}] under tenant {kwargs.get('schema_name', 'UNSPECIFIED')}"
                                )
                            )
                        else:
                            logger.info(
                                _(
                                    f"{current_process().name} created a task from schedule [{s.name or s.id}] under tenant {kwargs.get('schema_name', 'UNSPECIFIED')}"
                                )
                            )
                        # default behavior is to delete a ONCE schedule
                        if s.schedule_type == s.ONCE:
                            if s.repeats < 0:
                                s.delete()
                                continue
                            # but not if it has a positive repeats
                            s.repeats = 0
                        # save the schedule
                        s.save()
    except Exception as e:
        logger.error(e)