Exemplo n.º 1
0
    def post_process(self):
        """
        If requested, perform additional processing of GMFs to produce hazard
        curves.
        """
        logs.LOG.debug('> starting post processing')

        if self.hc.hazard_curves_from_gmfs:
            post_processing.do_post_process(self.job)

            # If `mean_hazard_curves` is True and/or `quantile_hazard_curves`
            # has some value (not an empty list), do this additional
            # post-processing.
            if self.hc.mean_hazard_curves or self.hc.quantile_hazard_curves:
                tasks = cls_post_processing.setup_tasks(
                    self.job, self.job.hazard_calculation,
                    curve_finder=models.HazardCurveData.objects,
                    writers=dict(mean_curves=MeanCurveWriter,
                                 quantile_curves=QuantileCurveWriter))

                utils_tasks.distribute(
                        cls_post_processing.do_post_process,
                        ("post_processing_task", tasks),
                        tf_args=dict(job_id=self.job.id))

        logs.LOG.debug('< done with post processing')
Exemplo n.º 2
0
 def test_distribute_with_failing_subtask(self):
     """At least one subtask failed, a `TaskFailed` exception is raised."""
     try:
         tasks.distribute(failing_task, ("data", range(5)))
     except Exception, exc:
         # The exception is raised by the first task.
         self.assertEqual(0, exc.args[0])
Exemplo n.º 3
0
 def test_distribute_with_keyword_argument_not_expected_by_task(self):
     """
     An unexpected keyword argument is passed to the subtask triggering a
     `TypeError` exception.
     """
     try:
         tasks.distribute(single_arg_called_a, ("data", range(5)))
     except Exception, exc:
         self.assertEqual(
             "single_arg_called_a() got an unexpected keyword argument "
             "'data'", exc.args[0])
Exemplo n.º 4
0
 def test_distribute_with_type_error_and_no_exception_msg(self):
     """
     Exceptions without error messages should not result in another
     exception when being reraised.
     """
     from celery.result import TaskSetResult
     try:
         with patch('celery.task.sets.TaskSet.apply_async') as m2:
             m2.return_value = mock.Mock(spec=TaskSetResult)
             m2.return_value.join_native.side_effect = TypeError
             tasks.distribute(single_arg_called_a, ("a", range(5)))
     except Exception, exc:
         self.assertEqual((), exc.args)
Exemplo n.º 5
0
    def test_distribute_with_ignore_result_set(self):
        """
        The specified number of subtasks is actually spawned even for tasks
        with ignore_result=True and these run and complete.

        Since the results of the tasks are ignored, the only way to know that
        they ran and completed is to verify that the data they were supposed
        to write the key value store is actually there.
        """

        def value(key):
            """Construct a test value for the given key."""
            return key[-3:] * 2

        keys = ["irtc:%s" % str(uuid.uuid4())[:8] for _ in xrange(5)]
        values = [value(uid) for uid in keys]
        data = zip(keys, values)

        result = tasks.distribute(ignore_result, ("data", [[d] for d in data]))
        # An empty list is returned for tasks with ignore_result=True
        # and no asynchronous task handler function.
        self.assertEqual(False, bool(result))

        # Give the tasks a bit of time to complete.
        time.sleep(0.25)

        for key, value in data:
            self.assertEqual(value, TestStore.get(key))
Exemplo n.º 6
0
 def test_distribute_with_no_other_args(self):
     """The subtask is only invoked with the data to be processed."""
     # We expect the subtasks to see no positional arguments. The
     # data to be processed is passed in the keyword arguments.
     expected = [
         (), {"data_to_process": 11}, (), {"data_to_process": 12}]
     result = tasks.distribute(reflect_args, ("data_to_process", [11, 12]),
                               flatten_results=True)
     self.assertEqual(expected, result)
Exemplo n.º 7
0
    def test_distribute_with_other_args(self):
        """
        The subtask is invoked with the data to be processed as well as with
        other parameters.
        """
        # The keyword arguments below will be passed to the celery subtasks in
        # addition to the data that is to be processed.
        tf_args = {"1+1": 2, "2/1": 1}

        # We expect the subtasks to see the following positional and keyword
        # arguments respectively.
        expected = [
            ((), {"data_to_process": [13], "1+1": 2, "2/1": 1}),
            ((), {"data_to_process": [14], "1+1": 2, "2/1": 1})]

        # Two subtasks will be spawned and just return the arguments they
        # received.
        result = tasks.distribute(reflect_args,
                                  ("data_to_process", [[13], [14]]),
                                  tf_args=tf_args)
        self.assertEqual(expected, result)
Exemplo n.º 8
0
    def test_distribute_with_ignore_result_set_and_ath(self):
        """
        The specified number of subtasks is actually spawned (even for tasks
        with ignore_result=True) and the asynchronous task handler function is
        run.
        """

        def value(key):
            """Construct a test value for the given key."""
            return key[-3:] * 2

        def ath(data):
            """
            An asynchronous task handler function that converts all task
            results to upper case and returns the list of keys found.
            """
            items_expected = len(data)
            items_found = []
            while len(items_found) < items_expected:
                for key, _ in data:
                    if key in items_found:
                        continue
                    value = TestStore.get(key)
                    if value is not None:
                        TestStore.set(key, value.upper())
                        items_found.append(key)
                time.sleep(0.05)
            return items_found

        keys = ["irtc:%s" % str(uuid.uuid4())[:8] for _ in xrange(5)]
        values = [value(uid) for uid in keys]
        data = zip(keys, values)

        args = ("data", [[d] for d in data])
        result = tasks.distribute(ignore_result, args, ath=ath,
                                  ath_args=dict(data=data))
        self.assertEqual(sorted(keys), sorted(result))

        for key, value in data:
            self.assertEqual(value.upper(), TestStore.get(key))
Exemplo n.º 9
0
 def test_distribute_with_task_returning_single_item(self):
     """distribute() copes with tasks that return a single item."""
     expected = [1] * 5
     result = tasks.distribute(just_say_1, ("data", range(5)))
     self.assertEqual(expected, result)
Exemplo n.º 10
0
 def test_distribute_uses_the_specified_number_of_subtasks(self):
     """One subtasks per data item is actually spawned."""
     expected = ["hello"] * 5
     result = tasks.distribute(just_say_hello, ("data", range(5)))
     self.assertEqual(expected, result)
Exemplo n.º 11
0
 def test_distribute_returns_results_wo_flattening(self):
     """Results are returned in the right order."""
     expected = [[i] for i in range(7)]
     result = tasks.distribute(reflect_data_to_be_processed,
                               ("data", [[i] for i in range(7)]))
     self.assertEqual(expected, result)
Exemplo n.º 12
0
 def test_distribute_returns_results_in_right_order_when_flattened(self):
     """Results are returned in the right order when flattened."""
     expected = range(7)
     result = tasks.distribute(reflect_data_to_be_processed,
                               ("data", range(7)), flatten_results=True)
     self.assertEqual(expected, result)