示例#1
0
    def wrapped(*args):
        """
        Initialize logs, make sure the job is still running, and run the task
        code surrounded by a try-except. If any error occurs, log it as a
        critical failure.
        """
        # the last argument is assumed to be a monitor
        monitor = args[-1]
        job = models.OqJob.objects.get(id=monitor.job_id)
        if job.is_running is False:
            # the job was killed, it is useless to run the task
            raise JobNotRunning(monitor.job_id)

        # it is important to save the task id soon, so that
        # the revoke functionality can work
        with monitor("storing task id", task=tsk, autoflush=True):
            pass

        with logs.handle(job):
            check_mem_usage()  # warn if too much memory is used
            # run the task
            try:
                total = "total " + task_func.__name__
                with monitor(total, task=tsk):
                    with GroundShakingIntensityModel.forbid_instantiation():
                        return task_func(*args)
            finally:
                # save on the db
                CacheInserter.flushall()
                # the task finished, we can remove from the performance
                # table the associated row 'storing task id'
                models.Performance.objects.filter(
                    oq_job=job, operation="storing task id", task_id=tsk.request.id
                ).delete()
示例#2
0
    def aggregate_result_set(self, agg, acc):
        """
        Loop on a set of celery AsyncResults and update the accumulator
        by using the aggregation function.

        :param agg: the aggregation function, (acc, val) -> new acc
        :param acc: the initial value of the accumulator
        :returns: the final value of the accumulator
        """
        if not self.results:
            return acc
        backend = current_app().backend
        amqp_backend = backend.__class__.__name__.startswith("AMQP")
        rset = ResultSet(self.results)
        for task_id, result_dict in rset.iter_native():
            check_mem_usage()  # warn if too much memory is used
            result = result_dict["result"]
            if isinstance(result, BaseException):
                raise result
            self.received.append(len(result))
            acc = agg(acc, result.unpickle())
            if amqp_backend:
                # work around a celery bug
                del backend._cache[task_id]
        return acc
示例#3
0
    def wrapped(*args):
        """
        Initialize logs, make sure the job is still running, and run the task
        code surrounded by a try-except. If any error occurs, log it as a
        critical failure.
        """
        # job_id is always assumed to be the first argument
        job_id = args[0]
        job = models.OqJob.objects.get(id=job_id)
        if job.is_running is False:
            # the job was killed, it is useless to run the task
            raise JobNotRunning(job_id)

        # it is important to save the task id soon, so that
        # the revoke functionality can work
        EnginePerformanceMonitor.store_task_id(job_id, tsk)

        with EnginePerformanceMonitor(
                'total ' + task_func.__name__, job_id, tsk, flush=True):
            # tasks write on the celery log file
            logs.set_level(job.log_level)
            check_mem_usage()  # log a warning if too much memory is used
            try:
                # run the task
                return task_func(*args)
            finally:
                # save on the db
                CacheInserter.flushall()
                # the task finished, we can remove from the performance
                # table the associated row 'storing task id'
                models.Performance.objects.filter(
                    oq_job=job,
                    operation='storing task id',
                    task_id=tsk.request.id).delete()
示例#4
0
        def aggregate_result_set(self, agg, acc):
            """
            Loop on a set of celery AsyncResults and update the accumulator
            by using the aggregation function.

            :param agg: the aggregation function, (acc, val) -> new acc
            :param acc: the initial value of the accumulator
            :returns: the final value of the accumulator
            """
            if isinstance(self.oqtask, types.FunctionType):
                # don't use celery
                return super(OqTaskManager, self).aggregate_result_set(
                    agg, acc)
            if not self.results:
                return acc
            backend = current_app().backend
            amqp_backend = backend.__class__.__name__.startswith('AMQP')
            rset = ResultSet(self.results)
            for task_id, result_dict in rset.iter_native():
                idx = self.task_ids.index(task_id)
                self.task_ids.pop(idx)
                parallel.check_mem_usage()  # warn if too much memory is used
                result = result_dict['result']
                if isinstance(result, BaseException):
                    raise result
                self.received.append(len(result))
                acc = agg(acc, result.unpickle())
                if amqp_backend:
                    # work around a celery bug
                    del backend._cache[task_id]
            return acc
示例#5
0
        def aggregate_result_set(self, agg, acc):
            """
            Loop on a set of celery AsyncResults and update the accumulator
            by using the aggregation function.

            :param agg: the aggregation function, (acc, val) -> new acc
            :param acc: the initial value of the accumulator
            :returns: the final value of the accumulator
            """
            if isinstance(self.oqtask, types.FunctionType):
                # don't use celery
                return super(OqTaskManager,
                             self).aggregate_result_set(agg, acc)
            if not self.results:
                return acc
            backend = current_app().backend
            amqp_backend = backend.__class__.__name__.startswith('AMQP')
            rset = ResultSet(self.results)
            for task_id, result_dict in rset.iter_native():
                idx = self.task_ids.index(task_id)
                self.task_ids.pop(idx)
                parallel.check_mem_usage()  # warn if too much memory is used
                result = result_dict['result']
                if isinstance(result, BaseException):
                    raise result
                self.received.append(len(result))
                acc = agg(acc, result.unpickle())
                if amqp_backend:
                    # work around a celery bug
                    del backend._cache[task_id]
            return acc
示例#6
0
    def wrapped(*args):
        """
        Initialize logs, make sure the job is still running, and run the task
        code surrounded by a try-except. If any error occurs, log it as a
        critical failure.
        """
        # the last argument is assumed to be a monitor
        monitor = args[-1]
        job = models.OqJob.objects.get(id=monitor.job_id)
        if job.is_running is False:
            # the job was killed, it is useless to run the task
            raise JobNotRunning(monitor.job_id)

        # it is important to save the task id soon, so that
        # the revoke functionality can work
        with monitor('storing task id', task=tsk, autoflush=True):
            pass

        with logs.handle(job):
            check_mem_usage()  # warn if too much memory is used
            # run the task
            try:
                total = 'total ' + task_func.__name__
                with monitor(total, task=tsk, autoflush=True):
                    return task_func(*args)
            finally:
                # save on the db
                CacheInserter.flushall()
                # the task finished, we can remove from the performance
                # table the associated row 'storing task id'
                models.Performance.objects.filter(
                    oq_job=job,
                    operation='storing task id',
                    task_id=tsk.request.id).delete()
示例#7
0
 def submit(self, *args):
     """
     Submit an oqtask with the given arguments to celery and return
     an AsyncResult. If the variable OQ_NO_DISTRIBUTE is set, the
     task function is run in process and the result is returned.
     """
     check_mem_usage()  # log a warning if too much memory is used
     if no_distribute():
         res = safely_call(self.oqtask.task_func, args)
     else:
         piks = pickle_sequence(args)
         self.sent += sum(len(p) for p in piks)
         res = self.oqtask.delay(*piks)
     self.results.append(res)
示例#8
0
    def aggregate_result_set(self, agg, acc):
        """
        Loop on a set of celery AsyncResults and update the accumulator
        by using the aggregation function.

        :param agg: the aggregation function, (acc, val) -> new acc
        :param acc: the initial value of the accumulator
        :returns: the final value of the accumulator
        """
        if not self.results:
            return acc
        backend = current_app().backend
        rset = ResultSet(self.results)
        for task_id, result_dict in rset.iter_native():
            check_mem_usage()  # log a warning if too much memory is used
            result = result_dict['result']
            if isinstance(result, BaseException):
                raise result
            acc = agg(acc, result.unpickle())
            del backend._cache[task_id]  # work around a celery bug
        return acc
示例#9
0
    def aggregate_result_set(self, agg, acc):
        """
        Loop on a set of celery AsyncResults and update the accumulator
        by using the aggregation function.

        :param agg: the aggregation function, (acc, val) -> new acc
        :param acc: the initial value of the accumulator
        :returns: the final value of the accumulator
        """
        if not self.results:
            return acc
        backend = current_app().backend
        rset = ResultSet(self.results)
        for task_id, result_dict in rset.iter_native():
            check_mem_usage()  # warn if too much memory is used
            result = result_dict['result']
            if isinstance(result, BaseException):
                raise result
            self.received += len(result)
            acc = agg(acc, result.unpickle())
            del backend._cache[task_id]  # work around a celery bug
        return acc