Example #1
0
class test_failed_AsyncResult(test_GroupResult):

    def setup(self):
        self.size = 11
        subtasks = make_mock_group(10)
        failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
        save_result(failed)
        failed_res = AsyncResult(failed['id'])
        self.ts = GroupResult(uuid(), subtasks + [failed_res])

    def test_completed_count(self):
        self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)

    def test___iter__(self):
        it = iter(self.ts)

        def consume():
            return list(it)

        with self.assertRaises(KeyError):
            consume()

    def test_join(self):
        with self.assertRaises(KeyError):
            self.ts.join()

    def test_successful(self):
        self.assertFalse(self.ts.successful())

    def test_failed(self):
        self.assertTrue(self.ts.failed())
Example #2
0
class test_failed_AsyncResult(test_GroupResult):
    def setup(self):
        self.size = 11
        subtasks = make_mock_group(10)
        failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
        save_result(failed)
        failed_res = AsyncResult(failed['id'])
        self.ts = GroupResult(uuid(), subtasks + [failed_res])

    def test_completed_count(self):
        self.assertEqual(self.ts.completed_count(), len(self.ts) - 1)

    def test___iter__(self):
        it = iter(self.ts)

        def consume():
            return list(it)

        with self.assertRaises(KeyError):
            consume()

    def test_join(self):
        with self.assertRaises(KeyError):
            self.ts.join()

    def test_successful(self):
        self.assertFalse(self.ts.successful())

    def test_failed(self):
        self.assertTrue(self.ts.failed())
Example #3
0
 def test_join_timeout(self):
     ar = MockAsyncResultSuccess(uuid())
     ar2 = MockAsyncResultSuccess(uuid())
     ar3 = AsyncResult(uuid())
     ts = GroupResult(uuid(), [ar, ar2, ar3])
     with self.assertRaises(TimeoutError):
         ts.join(timeout=0.0000001)
Example #4
0
 def test_forget(self):
     subs = [MockAsyncResultSuccess(uuid()),
             MockAsyncResultSuccess(uuid())]
     ts = GroupResult(uuid(), subs)
     ts.forget()
     for sub in subs:
         self.assertTrue(sub.forgotten)
Example #5
0
File: test.py Project: hysds/sciflo
def join_map_jobs(task_ids):
    """Test reduce function that manually joins all mapped jobs."""

    print(("task_ids: {}".format(json.dumps(task_ids, indent=2))))
    res = GroupResult(id=uuid.uuid4().bytes, results=[
                      AsyncResult(id[0]) for id in task_ids])
    while True:
        ready = res.ready()
        if ready:
            break
        time.sleep(5)
    results = []
    for r in res.join(timeout=10.):
        # deduped job?
        if isinstance(r, (list, tuple)):
            # build resolvable result
            task_id = r[0]
            results.append({'uuid': task_id,
                            'job_id': task_id,
                            'payload_id': task_id,
                            'status': 'job-deduped'})
        else:
            results.append(r)
    args = [result['payload_id'] for result in results]
    return args
Example #6
0
 def test_iter_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend)
                     for i in range(10)]
     ts = GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     self.assertEqual(len(list(ts.iter_native())), 10)
Example #7
0
 def test_restore_current_app_fallback(self):
     subs = [MockAsyncResultSuccess(uuid(), app=self.app)]
     ts = self.app.GroupResult(uuid(), subs)
     ts.save()
     with pytest.raises(RuntimeError,
                        message="Test depends on current_app"):
         GroupResult.restore(ts.id)
Example #8
0
 def test_restore_current_app_fallback(self):
     subs = [MockAsyncResultSuccess(uuid(), app=self.app)]
     ts = self.app.GroupResult(uuid(), subs)
     ts.save()
     with pytest.raises(RuntimeError,
                        message="Test depends on current_app"):
         GroupResult.restore(ts.id)
 def test_forget(self):
     subs = [MockAsyncResultSuccess(uuid()),
             MockAsyncResultSuccess(uuid())]
     ts = GroupResult(uuid(), subs)
     ts.forget()
     for sub in subs:
         self.assertTrue(sub.forgotten)
Example #10
0
 def setup(self):
     self.size = 11
     subtasks = make_mock_group(10)
     failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
     save_result(failed)
     failed_res = AsyncResult(failed['id'])
     self.ts = GroupResult(uuid(), subtasks + [failed_res])
Example #11
0
 def test_join_timeout(self):
     ar = MockAsyncResultSuccess(uuid())
     ar2 = MockAsyncResultSuccess(uuid())
     ar3 = AsyncResult(uuid())
     ts = GroupResult(uuid(), [ar, ar2, ar3])
     with self.assertRaises(TimeoutError):
         ts.join(timeout=0.0000001)
 def test_iter_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend)
                     for i in range(10)]
     ts = GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     self.assertEqual(len(list(ts.iter_native())), 10)
Example #13
0
 def test_join_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend) for i in range(10)]
     ts = GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     res = ts.join_native()
     self.assertEqual(res, range(10))
Example #14
0
 def test_iterate_eager(self):
     ar1 = EagerResult(uuid(), 42, states.SUCCESS)
     ar2 = EagerResult(uuid(), 42, states.SUCCESS)
     ts = GroupResult(uuid(), [ar1, ar2])
     it = ts.iterate()
     self.assertEqual(next(it), 42)
     self.assertEqual(next(it), 42)
Example #15
0
 def test_iterate_yields(self):
     ar = MockAsyncResultSuccess(uuid())
     ar2 = MockAsyncResultSuccess(uuid())
     ts = GroupResult(uuid(), [ar, ar2])
     it = ts.iterate()
     self.assertEqual(next(it), 42)
     self.assertEqual(next(it), 42)
Example #16
0
 def test_join_native(self):
     backend = SimpleBackend()
     subtasks = [AsyncResult(uuid(), backend=backend) for i in range(10)]
     ts = GroupResult(uuid(), subtasks)
     backend.ids = [subtask.id for subtask in subtasks]
     res = ts.join_native()
     self.assertEqual(res, list(range(10)))
Example #17
0
 def test_save_restore(self):
     subs = [MockAsyncResultSuccess(uuid()), MockAsyncResultSuccess(uuid())]
     ts = GroupResult(uuid(), subs)
     ts.save()
     with self.assertRaises(AttributeError):
         ts.save(backend=object())
     self.assertEqual(GroupResult.restore(ts.id).subtasks, ts.subtasks)
     ts.delete()
     self.assertIsNone(GroupResult.restore(ts.id))
Example #18
0
    def test_empty_group_result(self, manager):
        if not manager.app.conf.result_backend.startswith('redis'):
            raise pytest.skip('Requires redis result backend.')

        task = group([])
        result = task.apply_async()

        GroupResult.save(result)
        task = GroupResult.restore(result.id)
        assert task.results == []
Example #19
0
    def test_empty_group_result(self, manager):
        if not manager.app.conf.result_backend.startswith('redis'):
            raise pytest.skip('Requires redis result backend.')

        task = group([])
        result = task.apply_async()

        GroupResult.save(result)
        task = GroupResult.restore(result.id)
        assert task.results == []
    def test_groupresult_save_restore(self):
        """Test if we can save and restore a GroupResult"""
        group_id = uuid()
        results = [AsyncResult(id=uuid())]
        group = GroupResult(id=group_id, results=results)

        group.save(backend=self.b)

        restored_group = self.b.restore_group(group_id=group_id)

        assert restored_group == group
Example #21
0
def cancel(request, instance, import_type, import_event_id):
    ie = _get_import_event(instance, import_type, import_event_id)

    ie.status = GenericImportEvent.CANCELED
    ie.save()

    # If verifications tasks are still scheduled, we need to revoke them
    if ie.task_id:
        GroupResult.restore(ie.task_id).revoke()

    return list_imports(request, instance)
Example #22
0
def cancel(request, instance, import_type, import_event_id):
    ie = _get_import_event(instance, import_type, import_event_id)

    ie.status = GenericImportEvent.CANCELED
    ie.save()

    # If verifications tasks are still scheduled, we need to revoke them
    if ie.task_id:
        GroupResult.restore(ie.task_id).revoke()

    return list_imports(request, instance)
Example #23
0
 def create_group_result(self):
     """Return a GroupResult model instance
     with a single, successful result"""
     id = uuid()
     task_result = self.create_task_result()
     task_result.status = states.SUCCESS
     task_result.save()
     results = [AsyncResult(id=task_result.task_id)]
     group = CeleryGroupResult(id=id, results=results)
     group.save()
     meta = GroupResult.objects.get(group_id=id)
     return meta
    def test_groupresult_save_restore_nested(self):
        """Test if we can save and restore a nested GroupResult"""
        group_id = uuid()
        async_result = AsyncResult(id=uuid())
        nested_results = [AsyncResult(id=uuid()), AsyncResult(id=uuid())]
        nested_group = GroupResult(id=uuid(), results=nested_results)
        group = GroupResult(id=group_id, results=[nested_group, async_result])

        group.save(backend=self.b)

        restored_group = self.b.restore_group(group_id=group_id)

        assert restored_group == group
 def test_save_restore(self):
     subs = [MockAsyncResultSuccess(uuid()),
             MockAsyncResultSuccess(uuid())]
     ts = GroupResult(uuid(), subs)
     ts.save()
     with self.assertRaises(AttributeError):
         ts.save(backend=object())
     self.assertEqual(GroupResult.restore(ts.id).subtasks,
                      ts.subtasks)
     ts.delete()
     self.assertIsNone(GroupResult.restore(ts.id))
Example #26
0
def poll_state(request):
    """ A view to report the progress to the user """
    data = 'Fail'
    if request.is_ajax():
      if 'task_id' in request.POST.keys() and request.POST['task_id']:
          task_id = request.POST['task_id']
          task_total = request.POST['task_total']
          task = GroupResult.restore(task_id, app=app)
          progress = task.completed_count()/float(task_total)
          if progress <= 0:
            progress = 0.01
          data = {}
          if progress >= 1.0 and request.session.get('saved') == None:
            request.session['saved'] = 'saved'
            progress = None
            result = task.get()
            task.forget()
            uuid = request.POST['product_uuid']
            p = Product.objects.get(uuid=uuid)
            historic_id = save_product_indexing(result, p)
            data['historic_id'] = urlsafe_base64_encode(force_bytes(historic_id))
            data['uuid'] = str(p.uuid)
            del request.session['saved']
          data['process_percent'] = progress 
      else:
          data = 'No task_id in the request'
    else:
        data = 'This is not an ajax request'
    
    if isinstance(data, dict):
      data = json.dumps(data)
    return JsonResponse(data, safe=False)
Example #27
0
 def on_chord_part_return(self, task, propagate=True):
     if not self.implements_incr:
         return
     from celery import subtask
     from celery.result import GroupResult
     gid = task.request.group
     if not gid:
         return
     key = self.get_key_for_chord(gid)
     deps = GroupResult.restore(gid, backend=task.backend)
     val = self.incr(key)
     if val >= len(deps):
         j = deps.join_native if deps.supports_native_join else deps.join
         callback = subtask(task.request.chord)
         try:
             ret = j(propagate=propagate)
         except Exception as exc:
             culprit = next(deps._failed_join_report())
             self.app._tasks[callback.task].backend.fail_from_current_stack(
                 callback.id, exc=ChordError('Dependency %s raised %r' % (
                     culprit.id, exc))
             )
         else:
             callback.delay(ret)
         finally:
             deps.delete()
             self.client.delete(key)
     else:
         self.expire(key, 86400)
Example #28
0
 def setup(self):
     self.size = 11
     subtasks = make_mock_group(10)
     failed = mock_task('ts11', states.FAILURE, KeyError('Baz'))
     save_result(failed)
     failed_res = AsyncResult(failed['id'])
     self.ts = GroupResult(uuid(), subtasks + [failed_res])
    def handle(self, *args, **kwargs):  # pylint: disable=unused-argument
        edx_course_key = kwargs.get('edx_course_key')
        try:
            run = CourseRun.objects.get(edx_course_key=edx_course_key)
        except CourseRun.DoesNotExist:
            raise CommandError('Course Run for course_id "{0}" does not exist'.format(edx_course_key))

        con = get_redis_connection("redis")
        failed_users_count = con.llen(CACHE_KEY_FAILED_USERS_BASE_STR.format(edx_course_key))

        if CourseRunGradingStatus.is_complete(run):
            self.stdout.write(
                self.style.SUCCESS(
                    'Final grades for course "{0}" are complete'.format(edx_course_key)
                )
            )
        elif CourseRunGradingStatus.is_pending(run):
            cache_id = CACHE_ID_BASE_STR.format(edx_course_key)
            group_results_id = cache_redis.get(cache_id)
            if group_results_id is not None:
                results = GroupResult.restore(group_results_id, app=app)
                if not results.ready():
                    self.stdout.write(
                        self.style.WARNING(
                            'Final grades for course "{0}" are being processed'.format(edx_course_key)
                        )
                    )
                else:
                    self.stdout.write(
                        self.style.WARNING(
                            'Async task to freeze grade for course "{0}" '
                            'are done, but course is not marked as complete.'.format(edx_course_key)
                        )
                    )
            else:
                self.stdout.write(
                    self.style.ERROR(
                        'Final grades for course "{0}" are marked as they are being processed'
                        ', but no task found.'.format(edx_course_key)
                    )
                )
        else:
            self.stdout.write(
                self.style.WARNING(
                    'Final grades for course "{0}" are not being processed yet'.format(edx_course_key)
                )
            )
        message_detail = ', where {0} failed authentication'.format(failed_users_count) if failed_users_count else ''
        users_in_cache = set(CachedEnrollment.get_cached_users(run)).intersection(
            set(CachedCurrentGrade.get_cached_users(run))
        )
        self.stdout.write(
            self.style.SUCCESS(
                'The students with a final grade are {0}/{1}{2}'.format(
                    FinalGrade.objects.filter(course_run=run).count(),
                    len(users_in_cache),
                    message_detail
                )
            )
        )
Example #30
0
 def setup(self):
     self.size = 11
     subtasks = make_mock_group(10)
     failed = mock_task("ts11", states.FAILURE, KeyError("Baz"))
     save_result(failed)
     failed_res = AsyncResult(failed["id"])
     self.ts = GroupResult(uuid(), subtasks + [failed_res])
    def test_on_chord_part_return(self):
        """Test if the ChordCounter is properly decremented and the callback is
        triggered after all chord parts have returned"""
        gid = uuid()
        tid1 = uuid()
        tid2 = uuid()
        subtasks = [AsyncResult(tid1), AsyncResult(tid2)]
        group = GroupResult(id=gid, results=subtasks)
        self.b.apply_chord(group, self.add.s())

        chord_counter = ChordCounter.objects.get(group_id=gid)
        assert chord_counter.count == 2

        request = mock.MagicMock()
        request.id = subtasks[0].id
        request.group = gid
        request.task = "my_task"
        request.args = ["a", 1, "password"]
        request.kwargs = {"c": 3, "d": "e", "password": "******"}
        request.argsrepr = "argsrepr"
        request.kwargsrepr = "kwargsrepr"
        request.hostname = "celery@ip-0-0-0-0"
        result = {"foo": "baz"}

        self.b.mark_as_done(tid1, result, request=request)

        chord_counter.refresh_from_db()
        assert chord_counter.count == 1

        self.b.mark_as_done(tid2, result, request=request)

        with pytest.raises(ChordCounter.DoesNotExist):
            ChordCounter.objects.get(group_id=gid)

        request.chord.delay.assert_called_once()
Example #32
0
    def test_join_native_with_group_chain_group(self):
        """Test group(chain(group)) case, join_native can be run correctly.
        In group(chain(group)) case, GroupResult has no _cache property, and
        AsyncBackendMixin.iter_native returns a node instead of node._cache,
        this test make sure ResultSet.join_native can process correctly both
        values of AsyncBackendMixin.iter_native returns.
        """
        def _get_meta(tid, result=None, children=None):
            return {
                'status': states.SUCCESS,
                'result': result,
                'children': children,
                'task_id': tid,
            }

        results = [self.app.AsyncResult(t) for t in [1, 2, 3]]
        values = [(_.id, _get_meta(_.id, _)) for _ in results]
        g_res = GroupResult(6, [self.app.AsyncResult(t) for t in [4, 5]])
        results += [g_res]
        values += [(6, g_res.children)]
        x = self.app.ResultSet(results)
        x.results[0].backend = Mock()
        x.results[0].backend.join = Mock()
        x.results[3][0].get = Mock()
        x.results[3][0].get.return_value = g_res.results[0]
        x.results[3][1].get = Mock()
        x.results[3][1].get.return_value = g_res.results[1]
        x.iter_native = Mock()
        x.iter_native.return_value = values.__iter__()
        x.join_native()
        x.iter_native.assert_called()
Example #33
0
 def test_save_restore_delete_group(self):
     tid = uuid()
     tsr = GroupResult(tid, [AsyncResult(uuid()) for _ in range(10)])
     self.b.save_group(tid, tsr)
     self.b.restore_group(tid)
     self.assertEqual(self.b.restore_group(tid), tsr)
     self.b.delete_group(tid)
     self.assertIsNone(self.b.restore_group(tid))
Example #34
0
    def on_chord_part_return(self, task, state, result, propagate=None):
        if not self.implements_incr:
            return
        app = self.app
        if propagate is None:
            propagate = app.conf.CELERY_CHORD_PROPAGATES
        gid = task.request.group
        if not gid:
            return
        key = self.get_key_for_chord(gid)
        try:
            deps = GroupResult.restore(gid, backend=task.backend)
        except Exception as exc:
            callback = maybe_signature(task.request.chord, app=app)
            return self.chord_error_from_stack(
                callback,
                ChordError('Cannot restore group: {0!r}'.format(exc)),
            )
        if deps is None:
            try:
                raise ValueError(gid)
            except ValueError as exc:
                callback = maybe_signature(task.request.chord, app=app)
                return self.chord_error_from_stack(
                    callback,
                    ChordError('GroupResult {0} no longer exists'.format(gid)),
                )
        val = self.incr(key)
        if val >= len(deps):
            callback = maybe_signature(task.request.chord, app=app)
            j = deps.join_native if deps.supports_native_join else deps.join
            try:
                with allow_join_result():
                    ret = j(timeout=3.0, propagate=propagate)
            except Exception as exc:
                try:
                    culprit = next(deps._failed_join_report())
                    reason = 'Dependency {0.id} raised {1!r}'.format(
                        culprit,
                        exc,
                    )
                except StopIteration:
                    reason = repr(exc)

                self.chord_error_from_stack(callback, ChordError(reason))
            else:
                try:
                    callback.delay(ret)
                except Exception as exc:
                    self.chord_error_from_stack(
                        callback,
                        ChordError('Callback error: {0!r}'.format(exc)),
                    )
            finally:
                deps.delete()
                self.client.delete(key)
        else:
            self.expire(key, 86400)
 def post(self):
     task_ids = request.form.getlist('task_ids')
     results = [AsyncResult(tid, app=app.celery) for tid in task_ids]
     group_result = GroupResult(id=str(uuid.uuid4()), results=results)
     group_result.save()
     successful = all_finished(group_result.results)
     return {
         'status':
         'SUCCESS' if successful else 'PENDING',
         'bulk_task_id':
         str(group_result.id),
         'bulk_task_url':
         app.api.url_for(TaskSubscriptionAPI,
                         bulk_task_id=str(group_result.id),
                         _external=True),
         'task_results':
         get_all_task_results(group_result.results)
     }
Example #36
0
def group_status(request, group_id):
    """Return group id and its async results status & result in JSON format."""
    result = GroupResult.restore(group_id)
    retval = [{
        "result": async_result.result,
        "status": async_result.status
    } for async_result in result.results]
    response_data = {'id': group_id, 'results': retval}
    return JsonResponse({'group': response_data})
Example #37
0
    def post(self, request):

        task = request.POST['task_id']
        res = GroupResult.restore(task)

        if res and not res.ready():
            return HttpResponse(json.dumps({"status": "loading"}), content_type="application/json")

        # Task completion allows for origin information to be pulled
        try:
            task_origin = TaskTracker.objects.get(group_id=task)
            record_type = task_origin.type
            indicator = task_origin.keyword

        except MultipleObjectsReturned:
            task_origin = TaskTracker.objects.filter(group_id=task).latest('date')
            record_type = task_origin.type
            indicator = task_origin.keyword

        except ObjectDoesNotExist:
            record_type = None
            indicator = None

        # Pull data according to the record type
        if record_type == "Recent":

            self.template_name = "pivoteer/RecentRecords.html"

            # Current hosting records
            host_record = IndicatorRecord.objects.recent_hosts(indicator)
            self.template_vars["current_hosts"] = host_record

            # Current WHOIS record
            whois_record = IndicatorRecord.objects.recent_whois(indicator)
            self.template_vars["current_whois"] = whois_record

        elif record_type == "Historical":

            self.template_name = "pivoteer/HistoricalRecords.html"

            # Historical hosting records
            host_records = IndicatorRecord.objects.historical_hosts(indicator, request)
            self.template_vars["hosting_records"] = host_records

            # Historical WHOIS records
            whois_record = IndicatorRecord.objects.historical_whois(indicator)
            self.template_vars["historical_whois"] = whois_record

        elif record_type == "Malware":

            self.template_name = "pivoteer/MalwareRecords.html"

            malware_records = IndicatorRecord.objects.malware_records(indicator)
            self.template_vars["malware_records"] = malware_records

        self.template_vars["origin"] = indicator
        return render(request, self.template_name, self.template_vars)
Example #38
0
    def on_chord_part_return(self, task, state, result, propagate=None):
        if not self.implements_incr:
            return
        app = self.app
        if propagate is None:
            propagate = app.conf.CELERY_CHORD_PROPAGATES
        gid = task.request.group
        if not gid:
            return
        key = self.get_key_for_chord(gid)
        try:
            deps = GroupResult.restore(gid, backend=task.backend)
        except Exception as exc:
            callback = maybe_signature(task.request.chord, app=app)
            return self.chord_error_from_stack(
                callback,
                ChordError('Cannot restore group: {0!r}'.format(exc)),
            )
        if deps is None:
            try:
                raise ValueError(gid)
            except ValueError as exc:
                callback = maybe_signature(task.request.chord, app=app)
                return self.chord_error_from_stack(
                    callback,
                    ChordError('GroupResult {0} no longer exists'.format(gid)),
                )
        val = self.incr(key)
        if val >= len(deps):
            callback = maybe_signature(task.request.chord, app=app)
            j = deps.join_native if deps.supports_native_join else deps.join
            try:
                with allow_join_result():
                    ret = j(timeout=3.0, propagate=propagate)
            except Exception as exc:
                try:
                    culprit = next(deps._failed_join_report())
                    reason = 'Dependency {0.id} raised {1!r}'.format(
                        culprit, exc,
                    )
                except StopIteration:
                    reason = repr(exc)

                self.chord_error_from_stack(callback, ChordError(reason))
            else:
                try:
                    callback.delay(ret)
                except Exception as exc:
                    self.chord_error_from_stack(
                        callback,
                        ChordError('Callback error: {0!r}'.format(exc)),
                    )
            finally:
                deps.delete()
                self.client.delete(key)
        else:
            self.expire(key, 86400)
    def handle(self, *args, **kwargs):  # pylint: disable=unused-argument
        edx_course_key = kwargs.get('edx_course_key')
        try:
            run = CourseRun.objects.get(edx_course_key=edx_course_key)
        except CourseRun.DoesNotExist:
            raise CommandError(
                'Course Run for course_id "{0}" does not exist'.format(
                    edx_course_key))

        con = get_redis_connection("redis")
        failed_users_count = con.llen(
            CACHE_KEY_FAILED_USERS_BASE_STR.format(edx_course_key))

        if CourseRunGradingStatus.is_complete(run):
            self.stdout.write(
                self.style.SUCCESS(
                    'Final grades for course "{0}" are complete'.format(
                        edx_course_key)))
        elif CourseRunGradingStatus.is_pending(run):
            cache_id = CACHE_ID_BASE_STR.format(edx_course_key)
            group_results_id = cache_redis.get(cache_id)
            if group_results_id is not None:
                results = GroupResult.restore(group_results_id, app=app)
                if not results.ready():
                    self.stdout.write(
                        self.style.WARNING(
                            'Final grades for course "{0}" are being processed'
                            .format(edx_course_key)))
                else:
                    self.stdout.write(
                        self.style.WARNING(
                            'Async task to freeze grade for course "{0}" '
                            'are done, but course is not marked as complete.'.
                            format(edx_course_key)))
            else:
                self.stdout.write(
                    self.style.ERROR(
                        'Final grades for course "{0}" are marked as they are being processed'
                        ', but no task found.'.format(edx_course_key)))
        else:
            self.stdout.write(
                self.style.WARNING(
                    'Final grades for course "{0}" are not being processed yet'
                    .format(edx_course_key)))
        message_detail = ', where {0} failed authentication'.format(
            failed_users_count) if failed_users_count else ''
        users_in_cache = set(
            CachedEnrollment.get_cached_users(run)).intersection(
                set(CachedCurrentGrade.get_cached_users(run)))
        self.stdout.write(
            self.style.SUCCESS(
                'The students with a final grade are {0}/{1}{2}'.format(
                    FinalGrade.objects.filter(course_run=run).count(),
                    len(users_in_cache), message_detail)))
Example #40
0
def collect_stats_transcoding(group_id):
    """
    Collect statistics about the given transcoding group job.
    """
    group = GroupResult.restore(group_id)
    data = {}
    data['state'] = 'TRANSCODING'
    data['current'] = group.completed_count()
    data['total'] = len(group)
    
    return data, group.ready()
 def test_apply_chord_header_result_arg(self):
     """Test if apply_chord can handle Celery <= 5.1 call signature"""
     gid = uuid()
     tid1 = uuid()
     tid2 = uuid()
     subtasks = [AsyncResult(tid1), AsyncResult(tid2)]
     group = GroupResult(id=gid, results=subtasks)
     # Celery < 5.1
     self.b.apply_chord(group, self.add.s())
     # Celery 5.1
     self.b.apply_chord((uuid(), subtasks), self.add.s())
Example #42
0
    def stop_jobs(jobs: List[Job], job_options: Dict[str, Any]):
        raise NotImplementedError
        # TODO: not sure whether we should .revoke(terminate=True)
        # TODO: one of the options below should work, but I don't have time to test it right now...

        # http://docs.celeryproject.org/en/latest/userguide/workers.html#worker-persistent-revokes
        from celery.result import AsyncResult
        AsyncResult(job_options['command_id']).revoke()

        # https://docs.celeryproject.org/en/stable/reference/celery.result.html
        from celery.result import GroupResult
        g = GroupResult(id=job_options['command_id'])

        # https://stackoverflow.com/questions/13685344/retrieving-groupresult-from-taskset-id-in-celery
        # We may need to call result.save() in the task above for it to work...
        from celery.result import GroupResult
        result = GroupResult.restore(job_options['command_id'])
        result.revoke()

        from celery.task.control import revoke
        revoke(job_options['command_id'])
Example #43
0
    def post(self, request):

        desired_time = time_jump(hours=-24)

        task = request.POST['task_id']
        res = GroupResult.restore(task)

        if res and not res.ready():
            return HttpResponse(json.dumps({"status": "loading"}), content_type="application/json")

        # Task completion allows for origin information to be pulled
        try:
            task_origin = TaskTracker.objects.get(group_id=task)
            record_type = task_origin.type
            indicator = task_origin.keyword

        except MultipleObjectsReturned:
            task_origin = TaskTracker.objects.filter(group_id=task).latest('date')
            record_type = task_origin.type
            indicator = task_origin.keyword

        except ObjectDoesNotExist:
            record_type = None
            indicator = None

        # Pull data according to the record type
        if record_type == "current":
            # Collect whois record for current records
            whois_record = WhoisRecord.objects.recent_record(indicator)
            self.template_vars["whois_record"] = whois_record

            # Collect host records for current records
            host_record = HostRecord.objects.current_hosts(indicator, desired_time)
            self.template_vars["host_record"] = host_record
            self.template_name = "pivoteer/CurrentRecords.html"

        elif record_type == "passive":
            host_records = HostRecord.objects.passive_records(indicator, request)
            self.template_vars["passive_records"] = host_records
            self.template_name = "pivoteer/PassiveRecords.html"

        elif record_type == "malware":
            malware_records = MalwareRecord.objects.malware_records(indicator)
            self.template_vars["malware_records"] = malware_records
            self.template_name = "pivoteer/MalwareRecords.html"

        elif record_type == "other":
            google_search = SearchEngineHits.objects.recent_record(indicator)
            self.template_vars["google_search"] = google_search
            self.template_name = "pivoteer/OtherRecords.html"

        return render(request, self.template_name, self.template_vars)
Example #44
0
 def _freeze(self, _id=None):
     opts = self.options
     try:
         gid = opts['group']
     except KeyError:
         gid = opts['group'] = uuid()
     new_tasks, results = [], []
     for task in self.tasks:
         task = maybe_subtask(task).clone()
         results.append(task._freeze())
         new_tasks.append(task)
     self.tasks = self.kwargs['tasks'] = new_tasks
     return GroupResult(gid, results)
Example #45
0
    def on_chord_part_return(self, request, state, result, **kwargs):
        if not self.implements_incr:
            return
        app = self.app
        gid = request.group
        if not gid:
            return
        key = self.get_key_for_chord(gid)
        try:
            deps = GroupResult.restore(gid, backend=self)
        except Exception as exc:
            callback = maybe_signature(request.chord, app=app)
            logger.error("Chord %r raised: %r", gid, exc, exc_info=1)
            return self.chord_error_from_stack(callback, ChordError("Cannot restore group: {0!r}".format(exc)))
        if deps is None:
            try:
                raise ValueError(gid)
            except ValueError as exc:
                callback = maybe_signature(request.chord, app=app)
                logger.error("Chord callback %r raised: %r", gid, exc, exc_info=1)
                return self.chord_error_from_stack(callback, ChordError("GroupResult {0} no longer exists".format(gid)))
        val = self.incr(key)
        size = len(deps)
        if val > size:  # pragma: no cover
            logger.warning("Chord counter incremented too many times for %r", gid)
        elif val == size:
            callback = maybe_signature(request.chord, app=app)
            j = deps.join_native if deps.supports_native_join else deps.join
            try:
                with allow_join_result():
                    ret = j(timeout=3.0, propagate=True)
            except Exception as exc:
                try:
                    culprit = next(deps._failed_join_report())
                    reason = "Dependency {0.id} raised {1!r}".format(culprit, exc)
                except StopIteration:
                    reason = repr(exc)

                logger.error("Chord %r raised: %r", gid, reason, exc_info=1)
                self.chord_error_from_stack(callback, ChordError(reason))
            else:
                try:
                    callback.delay(ret)
                except Exception as exc:
                    logger.error("Chord %r raised: %r", gid, exc, exc_info=1)
                    self.chord_error_from_stack(callback, ChordError("Callback error: {0!r}".format(exc)))
            finally:
                deps.delete()
                self.client.delete(key)
        else:
            self.expire(key, 86400)
    def group_result(self, app=None):
        """Return the GroupResult of self.

        Arguments:
        ---------
            app (Celery): app instance to create the GroupResult with.

        """
        return GroupResult(
            self.group_id,
            [result_from_tuple(r, app=app)
             for r in json.loads(self.sub_tasks)],
            app=app,
        )
Example #47
0
    def get(self, request, *args, **kwargs):
        task_id = kwargs.get('pk')
        if not task_id:
            return Response(status=status.HTTP_400_BAD_REQUEST)

        group_result = GroupResult.restore(task_id)

        if not group_result:
            return Response(status=status.HTTP_404_NOT_FOUND)
        if not group_result.ready():
            return Response({
                "status": settings.PROGRESS,
                "progress": {
                    "count": group_result.completed_count(),
                    "total": len(group_result)
                }
            })
        else:
            commit_if_not_yet(group_result)
            succeeded = {}
            failed = {}
            failed_count = 0
            succeeded_count = 0

            if group_result.supports_native_join:
                results = group_result.join_native()
            else:
                results = group_result.join()

            for res in results:
                task_stat, man_id, rem_url, errors, warnings = res
                if task_stat == settings.SUCCESS:
                    succeeded_count += 1
                    succeeded[rem_url] = {
                        'url': '/manifests/' + man_id,
                        'warnings': warnings
                    }
                else:
                    failed_count += 1
                    failed[rem_url] = {'errors': errors}

            d = {
                'succeeded': succeeded,
                'succeeded_count': succeeded_count,
                'failed': failed,
                'failed_count': failed_count,
                'total_count': len(group_result),
                'status': settings.SUCCESS
            }
            return Response(d)
def is_group_successful(request, group_id):
    """Return if group was successfull as boolean."""
    results = GroupResult.restore(group_id)

    return JsonResponse({
        'group': {
            'id':
            group_id,
            'results': [{
                'id': task.id,
                'executed': task.successful()
            } for task in results] if results else []
        }
    })
Example #49
0
    def on_chord_part_return(self, task, propagate=None):
        if not self.implements_incr:
            return
        from celery import subtask
        from celery.result import GroupResult
        app = self.app
        if propagate is None:
            propagate = self.app.conf.CELERY_CHORD_PROPAGATES
        gid = task.request.group
        if not gid:
            return
        key = self.get_key_for_chord(gid)
        deps = GroupResult.restore(gid, backend=task.backend)
        if deps is None:
            callback = subtask(task.request.chord)
            return app._tasks[callback.task].backend.fail_from_current_stack(
                callback.id,
                exc=ChordError('GroupResult {0} no longer exists'.format(gid))
            )
        val = self.incr(key)
        if val >= len(deps):
            callback = subtask(task.request.chord)
            j = deps.join_native if deps.supports_native_join else deps.join
            try:
                ret = j(propagate=propagate)
            except Exception as exc:
                try:
                    culprit = next(deps._failed_join_report())
                    reason = 'Dependency {0.id} raised {1!r}'.format(
                        culprit, exc,
                    )
                except StopIteration:
                    reason = repr(exc)

                app._tasks[callback.task].backend.fail_from_current_stack(
                    callback.id, exc=ChordError(reason),
                )
            else:
                try:
                    callback.delay(ret)
                except Exception as exc:
                    app._tasks[callback.task].backend.fail_from_current_stack(
                        callback.id,
                        exc=ChordError('Callback error: {0!r}'.format(exc)),
                    )
            finally:
                deps.delete()
                self.client.delete(key)
        else:
            self.expire(key, 86400)
Example #50
0
def cancel(request, instance, import_type, import_event_id):
    ie = _get_import_event(instance, import_type, import_event_id)

    ie.status = GenericImportEvent.CANCELED
    ie.mark_finished_and_save()

    # If verifications tasks are still scheduled, we need to revoke them
    if ie.task_id:
        result = GroupResult.restore(ie.task_id)
        if result:
            result.revoke()

    # If we couldn't get the task, it is already effectively cancelled

    return list_imports(request, instance)
Example #51
0
    def process(self, taskid, format=None):
        "process a taskset"
        result = GroupResult.restore(taskid, backend=RBACKEND)
        if (result is None or
            'taskids' not in session or
            taskid not in session['taskids']):
            if format == 'json':
                return ajax_code(404,
                        _('The task status requested '
                        'has expired or does not exist'))
            flash(_('The task status requested has expired or does not exist'))
            redirect(url(controller='messages', action='quarantine'))
        percent = "0.0"
        status = 'PROGRESS'
        results = []
        if result.ready():
            finished = True
            results = result.join_native()
        else:
            session['bulkprocess-count'] += 1
            if (session['bulkprocess-count'] >= 10 and
                result.completed_count() == 0):
                result.revoke()
                del session['bulkprocess-count']
                session.save()
                if format == 'json':
                    return ajax_code(503,
                        _('An error occured in processing, try again later'))
                flash_alert(
                        _('An error occured in processing, try again later'))
                redirect(url(controller='messages', action='quarantine'))
            finished = False
            percent = "%.1f" % ((1.0 * int(result.completed_count()) /
                                len(result)) * 100)

        if format == 'json':
            response.headers['Content-Type'] = 'application/json'
            data = dict(finished=finished,
                        results=results,
                        status=status,
                        completed=percent)
            return json.dumps(data)

        c.finished = finished
        c.results = results
        c.status = status
        c.completed = percent
        return self.render('/messages/taskstatus.html')
Example #52
0
def cancel_task(taskid):
    """
    Cancel a task depending on taskid parameter.
    The taskid can be a list of subtasks id's or
    the gruoptask id.
    :param taskid: Task id.
    """
    if isinstance(taskid, list):
        # taskid is a subtasks id's list
        for stask in taskid:
            logger.info("Cancelling subtask: {0}".format(stask))
            AsyncResult(stask).revoke(terminate=True, signal="SIGKILL")
    else:
        # taskid is a GroupResult
        logger.info("Canceling group task: {0}".format(taskid))
        gtask = GroupResult.restore(taskid)
        gtask.revoke(terminate=True, signal="SIGKILL")
Example #53
0
 def on_chord_part_return(self, task, propagate=False):
     if not self.implements_incr:
         return
     from celery import subtask
     from celery.result import GroupResult
     gid = task.request.group
     if not gid:
         return
     key = self.get_key_for_chord(gid)
     deps = GroupResult.restore(gid, backend=task.backend)
     val = self.incr(key)
     if val >= len(deps):
         subtask(task.request.chord).delay(deps.join(propagate=propagate))
         deps.delete()
         self.client.delete(key)
     else:
         self.expire(key, 86400)
    def handle(self, *args, **kwargs):  # pylint: disable=unused-argument
        edx_course_key = kwargs.get('edx_course_key')
        try:
            run = CourseRun.objects.get(edx_course_key=edx_course_key)
        except CourseRun.DoesNotExist:
            raise CommandError('Course Run for course_id "{0}" does not exist'.format(edx_course_key))

        if not run.can_freeze_grades:
            self.stdout.write(
                self.style.ERROR(
                    'Course Run "{0}" cannot be marked as frozen yet'.format(edx_course_key)
                )
            )
            return

        if CourseRunGradingStatus.is_complete(run):
            self.stdout.write(
                self.style.SUCCESS(
                    'Course Run "{0}" is already marked as complete'.format(edx_course_key)
                )
            )
            return

        # check if there are tasks running
        cache_id = CACHE_ID_BASE_STR.format(edx_course_key)
        group_results_id = cache_redis.get(cache_id)
        if group_results_id is not None:
            results = GroupResult.restore(group_results_id, app=app)
            if results and not results.ready():
                self.stdout.write(
                    self.style.WARNING(
                        'Tasks for Course Run "{0}" are still running. '
                        'Impossible to set the global "complete" status'.format(edx_course_key)
                    )
                )
                return
            # if the tasks are done remove the entry in the cache
            cache_redis.delete(group_results_id)

        CourseRunGradingStatus.set_to_complete(run)
        self.stdout.write(
            self.style.SUCCESS(
                'Course Run "{0}" has been marked as complete'.format(edx_course_key)
            )
        )
Example #55
0
def status(group_id):
    result = GroupResult.restore(group_id)
    if not result:
        return json.dumps({
            'error': 'Does not exist'
        })

    return json.dumps({
        'tasks': [
            {
                'id': task.id,
                'status': task.status,
                'ready': task.ready(),
                'result': task.result
            } for task in result.children
        ],
        'ready': result.ready(),
        'completed': result.completed_count()
    })
Example #56
0
    def get(self, request, *args, **kwargs):
        task_id = kwargs.get('pk')
        if not task_id:
            return Response(status=status.HTTP_400_BAD_REQUEST)

        group_result = GroupResult.restore(task_id)

        if not group_result:
            return Response(status=status.HTTP_404_NOT_FOUND)
        if not group_result.ready():
            return Response({"status": settings.PROGRESS,
                             "progress": {"count": group_result.completed_count(),
                                          "total": len(group_result)}})
        else:
            succeeded = {}
            failed = {}
            failed_count = 0
            succeeded_count = 0

            if group_result.supports_native_join:
                results = group_result.join_native()
            else:
                results = group_result.join()

            for res in results:
                task_stat, man_id, rem_url, errors, warnings = res
                if task_stat == settings.SUCCESS:
                    succeeded_count += 1
                    succeeded[rem_url] = {'url': '/manifests/'+man_id,
                                          'warnings': warnings}
                else:
                    failed_count += 1
                    failed[rem_url] = {'errors': errors}

            d = {'succeeded': succeeded, 'succeeded_count': succeeded_count,
                 'failed': failed, 'failed_count': failed_count,
                 'total_count': len(group_result), 'status': settings.SUCCESS}

            return Response(d)
Example #57
0
class test_pending_Group(AppCase):
    def setup(self):
        self.ts = GroupResult(uuid(), [AsyncResult(uuid()), AsyncResult(uuid())])

    def test_completed_count(self):
        self.assertEqual(self.ts.completed_count(), 0)

    def test_ready(self):
        self.assertFalse(self.ts.ready())

    def test_waiting(self):
        self.assertTrue(self.ts.waiting())

    def x_join(self):
        with self.assertRaises(TimeoutError):
            self.ts.join(timeout=0.001)

    @skip_if_quick
    def x_join_longer(self):
        with self.assertRaises(TimeoutError):
            self.ts.join(timeout=1)
Example #58
0
 def on_chord_part_return(self, task, propagate=None):
     if not self.implements_incr:
         return
     from celery import subtask
     from celery.result import GroupResult
     app = self.app
     if propagate is None:
         propagate = self.app.conf.CELERY_CHORD_PROPAGATES
     gid = task.request.group
     if not gid:
         return
     key = self.get_key_for_chord(gid)
     deps = GroupResult.restore(gid, backend=task.backend)
     val = self.incr(key)
     if val >= len(deps):
         j = deps.join_native if deps.supports_native_join else deps.join
         callback = subtask(task.request.chord)
         try:
             ret = j(propagate=propagate)
         except Exception, exc:
             try:
                 culprit = deps._failed_join_report().next()
                 reason = 'Dependency %s raised %r' % (culprit.id, exc)
             except StopIteration:
                 reason = repr(exc)
             app._tasks[callback.task].backend.fail_from_current_stack(
                 callback.id, exc=ChordError(reason),
             )
         else:
             try:
                 callback.delay(ret)
             except Exception, exc:
                 app._tasks[callback.task].backend.fail_from_current_stack(
                     callback.id,
                     exc=ChordError('Callback error: %r' % (exc, )),
                 )