Ejemplo n.º 1
0
    def deploy_contract(
            self,
            uuid: UUID,
            contract_name: str,
            args: Optional[tuple] = None, kwargs: Optional[dict] = None,
            signing_address: Optional[str] = None, encrypted_private_key: Optional[str]=None,
            gas_limit: Optional[int] = None,
            prior_tasks: Optional[UUIDList] = None
    ):
        """
        The main deploy contract entrypoint for the processor.

        :param uuid: the celery generated uuid for the task
        :param contract_name: System will attempt to fetched abi and bytecode from this
        :param args: arguments for the constructor
        :param kwargs: keyword arguments for the constructor
        :param signing_address: address of the wallet signing the txn
        :param encrypted_private_key: private key of the wallet making the transaction, encrypted using key from settings
        :param gas_limit: limit on the amount of gas txn can use. Overrides system default
        :param prior_tasks: a list of task uuid that must succeed before this task will be attempted
        """

        signing_wallet_obj = self.get_signing_wallet_object(signing_address, encrypted_private_key)

        task = self.persistence_interface.create_deploy_contract_task(uuid,
                                                                      signing_wallet_obj,
                                                                      contract_name,
                                                                      args, kwargs,
                                                                      gas_limit,
                                                                      prior_tasks)

        # Attempt Create Async Transaction
        signature(utils.eth_endpoint('_attempt_transaction'), args=(task.uuid,)).delay()
Ejemplo n.º 2
0
def send_newsletter() -> None:
    users_sig = signature("users.get_all", queue="users")

    newsletters_sig = signature("mailing.send_newsletter_all", queue="mailing")

    task_chain = chain(users_sig | newsletters_sig)
    task_chain.apply_async()
Ejemplo n.º 3
0
    def send_eth(self,
                 uuid: UUID,
                 amount_wei: int,
                 recipient_address: str,
                 signing_address: Optional[str] = None, encrypted_private_key: Optional[str] = None,
                 prior_tasks: Optional[UUIDList] = None,
                 posterior_tasks: Optional[UUIDList] = None):
        """
        The main entrypoint sending eth.

        :param uuid: the celery generated uuid for the task
        :param amount_wei: the amount in WEI to send
        :param recipient_address: the recipient address
        :param signing_address: address of the wallet signing the txn
        :param encrypted_private_key: private key of the wallet making the transaction, encrypted using key from settings
        :param prior_tasks: a list of task uuids that must succeed before this task will be attempted
        :param posterior_tasks: a uuid list of tasks for which this task must succeed before they will be attempted
        :return: task_id
        """

        signing_wallet_obj = self.get_signing_wallet_object(signing_address, encrypted_private_key)

        task = self.persistence_interface.create_send_eth_task(uuid,
                                                               signing_wallet_obj,
                                                               recipient_address, amount_wei,
                                                               prior_tasks,
                                                               posterior_tasks)

        # Attempt Create Async Transaction
        signature(utils.eth_endpoint('_attempt_transaction'), args=(task.uuid,)).delay()
Ejemplo n.º 4
0
    def send_tasks(self,
                   from_loc: str,
                   to_loc: str,
                   start_date: str,
                   nb_passenger: str,
                   workers: list = None) -> AsyncResult:
        if workers is None:
            workers = self.workers
        kwargs = {
            "from_loc": from_loc,
            "to_loc": to_loc,
            "start_date": start_date,
            "nb_passenger": nb_passenger
        }
        sigs = [
            signature(
                "worker",
                kwargs=kwargs,
                routing_key=f"journey.{k}",
                exchange="bonvoyage",
            ) for k in workers
        ]
        print(sigs)
        broker = signature("broker",
                           kwargs=kwargs,
                           routing_key="journey.broker",
                           exchange="bonvoyage")
        r = chord(sigs)(broker)

        return r
Ejemplo n.º 5
0
def trigger_event(group_name, event_name):

    event_source = Group.objects.get(name=group_name)
    event_definition = EventDefinition.objects.get(name=event_name)
    event_write = Event(event_definition=event_definition,
                        device_group=event_source)
    event_write.save()
    # completion of automation writes an event which could trigger another automation
    automations = Automation.objects.filter(cause=event_definition,
                                            source_group=event_source)
    for automation in automations:
        dummy_result = [None]
        event_target = automation.target_group
        config_id = str(event_target.get_config_id())
        # initialize automation chain with triggered automation
        automation_chain = signature(automation.effect,
                                     args=dummy_result,
                                     kwargs={'config_id': config_id})
        a = automation
        while a.next:
            a = a.next
            automation_chain |= (signature(
                'events.tasks.evaluate_event',
                kwargs={
                    'group_name': group_name,
                    'event_name': event_name
                }) | signature(a.effect, kwargs={'config_id': config_id}))
        automation_chain.apply_async()
Ejemplo n.º 6
0
    def check_transaction_response(self, celery_task, transaction_id):
        def transaction_response_countdown():
            t = lambda retries: ETH_CHECK_TRANSACTION_BASE_TIME * 2**retries

            # If the system has been longer than the max retry period
            # if previous_result:
            #     submitted_at = datetime.strptime(previous_result['submitted_date'], "%Y-%m-%d %H:%M:%S.%f")
            #     if (datetime.utcnow() - submitted_at).total_seconds() > ETH_CHECK_TRANSACTION_RETRIES_TIME_LIMIT:
            #         if self.request.retries != self.max_retries:
            #             self.request.retries = self.max_retries - 1
            #
            #         return 0

            return t(celery_task.request.retries)

        try:
            transaction_object = self.persistence_interface.get_transaction(
                transaction_id)

            task = transaction_object.task

            transaction_hash = transaction_object.hash

            result = self.check_transaction_hash(transaction_hash)

            self.persistence_interface.update_transaction_data(
                transaction_id, result)

            status = result.get('status')

            print(
                f'Status for transaction {transaction_object.id} of task UUID {task.uuid} is:'
                f'\n {status}')

            if status == 'SUCCESS':

                unstarted_posteriors = self.get_unstarted_posteriors(task)

                for dep_task in unstarted_posteriors:
                    print('Starting posterior task: {}'.format(dep_task.uuid))
                    signature(utils.eth_endpoint('_attempt_transaction'),
                              args=(dep_task.uuid, )).delay()

                self.persistence_interface.set_task_status_text(
                    task, 'SUCCESS')

            if status == 'PENDING':
                celery_task.request.retries = 0
                raise Exception("Need Retry")

            if status == 'FAILED':
                self.new_transaction_attempt(task)

        except TaskRetriesExceededError as e:
            pass

        except Exception as e:
            print(e)
            celery_task.retry(countdown=transaction_response_countdown())
Ejemplo n.º 7
0
def create_and_store_estimator_from_all_eligible(
        minimum_verifications=ff_conf.ML_MINIMUM_TAG_VERIFICATIONS_DURING_STAGE_1):
        return (
            celery.signature('django.training_eligible_data',
                             args=(minimum_verifications,)) |
            celery.signature('learn.create_estimator') |
            celery.signature('results.store_estimator')
        ).apply_async()
Ejemplo n.º 8
0
 def send(self, message, extra={}, tags={}, sentry_data={}, crash_obj=None):
     event_id = self.client.capture('raven.events.Message',
                                    message=message,
                                    extra=extra,
                                    tags=tags,
                                    data=data)
     signature("tasks.get_sentry_link",
               args=(crash_obj.pk, event_id)).apply_async(queue='private',
                                                          countdown=1)
Ejemplo n.º 9
0
    def test_link_error_using_signature(self):
        fail = signature('t.integration.tasks.fail', args=("test", ))
        retrun_exception = signature('t.integration.tasks.return_exception')

        fail.link_error(retrun_exception)

        exception = ExpectedException("Task expected to fail", "test")
        assert (fail.delay().get(timeout=TIMEOUT,
                                 propagate=False), True) == (exception, True)
Ejemplo n.º 10
0
def unlock_graph(result, callback,
                 interval=1, propagate=False, max_retries=None):
    if result.ready():
        second_level_res = result.get()
        if second_level_res.ready():
            signature(callback).delay(list(joinall(
                second_level_res, propagate=propagate)))
    else:
        unlock_graph.retry(countdown=interval, max_retries=max_retries)
Ejemplo n.º 11
0
 def send(self, message, extra={}, tags={}, sentry_data={}, crash_obj=None):
     event_id = self.client.capture(
         'raven.events.Message',
         message=message,
         extra=extra,
         tags=tags,
         data=sentry_data
     )
     signature("tasks.get_sentry_link", args=(crash_obj.pk, event_id)).apply_async(queue='private', countdown=1)
Ejemplo n.º 12
0
 def upgrade(self, request, pk=None):
     """
     更新节点
     """
     data = request.data
     data['version'] = data.get('version') or 'null'
     TbCetusNodeInfo.objects.filter(pk=data['id']).update(status=2)
     signature('upgrade_node', kwargs=data).delay()
     return Response('更新请求发送成功', status=status.HTTP_200_OK)
Ejemplo n.º 13
0
def get_submission(self, submission_id):

    json_metrics = []
    try:
        start_time = time.time()
        submission = reddit.submission(id=submission_id)
        fetch_time = time.time() - start_time
    except prawcore.exceptions.PrawcoreException:
        logging.info('Cannot get submission from Reddit API.')
    finally:
        submission = RedditSubmission(
            original_URL=submission.url,
            author_name=submission.author.name,
            subreddit_name=submission.subreddit.name,
            subbreddit_display_name=submission.subreddit.display_name,
            post_title=submission.title,
            post_title_embedding=[],
            post_text=submission.selftext,
            post_text_embedding=[],
            upvote_ratio=submission.upvote_ratio,
            up_votes_number=submission.score,
            comments_number=submission.num_comments,
            nsfw=submission.over_18,
            spoiler=submission.spoiler,
            original=submission.is_original_content,
            distinguished=submission.distinguished,
            locked=submission.locked,
            fetch_time=fetch_time)

        title = submission.post_title
        title_to_log = (title[:15] + '..') if len(title) > 15 else title
        logging.info('SUBMISSION TITLE: ' + title_to_log)

        x = signature('put_embeddings', args=[submission
                                              ]) | signature('send_to_mongo')
        x.apply_async()

        json_metrics = [{
            "measurement": "submissions",
            "time": str(datetime.now()),
            "fields": {
                "fetch_time": fetch_time,
                "title_len": len(submission.post_title),
                "text_len": len(submission.post_text),
                "nsfw": str(submission.nsfw),
                "spoiler": str(submission.spoiler),
                "original_content": str(submission.original),
            }
        }]

    try:
        influxdb_client.write_points(json_metrics)
    except influxdb.exceptions.InfluxDBServerError:
        logging.info('Cannot send metrics to InfluxDB - Server Error.')
    except influxdb.exceptions.InfluxDBClientError:
        logging.info('Cannot send metrics to InfluxDB - Client Error.')
Ejemplo n.º 14
0
def notify_api_status(analysis_pk, task_status):
    logging.info("Notify API: analysis_id={}, status={}".format(
        analysis_pk,
        task_status
    ))
    signature(
        'set_task_status',
        args=(analysis_pk, task_status),
        queue='celery'
    ).delay()
Ejemplo n.º 15
0
def register_worker(sender, **k):
    m_supplier = os.environ.get('OASIS_MODEL_SUPPLIER_ID')
    m_name = os.environ.get('OASIS_MODEL_ID')
    m_id = os.environ.get('OASIS_MODEL_VERSION_ID')
    logging.info(
        'register_worker: SUPPLIER_ID={}, MODEL_ID={}, VERSION_ID={}'.format(
            m_supplier, m_name, m_id))
    signature('run_register_worker',
              args=(m_supplier, m_name, m_id),
              queue='celery').delay()
Ejemplo n.º 16
0
 def upgrade(self, request, pk=None):
     """
     更新服务
     """
     data = request.data
     data['version'] = data.get('version') or 'null'
     for item in TbCetusNodeInfo.objects.filter(group_id=pk):
         data.update(id=item.id)
         TbCetusNodeInfo.objects.filter(pk=item.id).update(status=2)
         signature('upgrade_node', kwargs=data).delay()
     return Response('更新请求发送成功', status=status.HTTP_200_OK)
Ejemplo n.º 17
0
 def install(self, request):
     """
     安装服务
     """
     data = request.data
     data['version'] = data.get('version') or 'null'
     data['path'] = data.get('path') or '/home'
     cetus_info = self.create(request)
     data['id'] = cetus_info.data.id
     signature('install_cetus', kwargs=data).delay()
     return Response('安装请求发送成功', status=status.HTTP_200_OK)
Ejemplo n.º 18
0
    def run_copr_build_handler(self, event_data: dict, number_of_builds: int):
        for _ in range(number_of_builds):
            self.pushgateway.copr_builds_queued.inc()

        signature(
            TaskName.copr_build.value,
            kwargs={
                "package_config": dump_package_config(self.package_config),
                "job_config": dump_job_config(self.job_config),
                "event": event_data,
            },
        ).apply_async()
Ejemplo n.º 19
0
def on_error(request, ex, traceback, record_task_name, analysis_pk,
             initiator_pk):
    """
    Because of how celery works we need to include a celery task registered in the
    current app to pass to the `link_error` function on a chain.

    This function takes the error and passes it on back to the server so that it can store
    the info on the analysis.
    """
    signature(record_task_name,
              args=(analysis_pk, initiator_pk, traceback),
              queue='celery').delay()
Ejemplo n.º 20
0
def _batch_by_size(collection_name, documents):
    """Groups a list of documents into batches which are below the size limit,
    so that they can be sent to Solr together
    """

    file_names = [path.json_text_path(d["pk"], d["slug"]) for d in documents]
    text_sizes = storage.async_size(file_names)

    docs_with_sizes = list(zip(documents, text_sizes))

    docs_with_sizes.sort(key=lambda x: x[1], reverse=True)

    batch = []
    batch_size = 0
    tasks = []
    for document, size in docs_with_sizes:
        if size > settings.SOLR_INDEX_MAX_SIZE:
            # if the isngle document is too large to index at once, index it by itself
            # so it can be indexed in pieces
            logger.info("[SOLR INDEX] batching single document %s size %s",
                        document["pk"], size)
            tasks.append(
                signature(
                    "documentcloud.documents.tasks.solr_reindex_single",
                    args=[collection_name, document["pk"]],
                ))
        elif (batch_size + size > settings.SOLR_INDEX_MAX_SIZE
              or len(batch) >= settings.SOLR_INDEX_BATCH_LIMIT):
            # if adding the next document would make the batch larger than the max
            # size, or if the batch is already at the max size,
            # then send the current batch to be indexed and start a new batch
            logger.info("[SOLR INDEX] batch of %s size %s", batch, batch_size)
            tasks.append(
                signature(
                    "documentcloud.documents.tasks.solr_reindex_batch",
                    args=[collection_name, batch],
                ))
            batch = [document["pk"]]
            batch_size = size
        else:
            # otherwise add the current document to the current batch
            batch.append(document["pk"])
            batch_size += size

    logger.info("[SOLR INDEX] batch of %s size %s", batch, batch_size)
    tasks.append(
        signature(
            "documentcloud.documents.tasks.solr_reindex_batch",
            args=[collection_name, batch],
        ))
    return tasks
Ejemplo n.º 21
0
        def _broadcast(**kwargs):
            """Broadcasts an event by calling the registered tasks."""

            app_name = kwargs.pop('app_name', None)
            event_name = kwargs.pop('event_name', None)

            if app_name and event_name:
                event = app.registry.event(app_name,
                                           event_name,
                                           raise_does_not_exist=True)
                app.update_local_event(event)
                for task in event.tasks:
                    signature(task.name, kwargs=kwargs,
                              queue=task.queue).delay()
Ejemplo n.º 22
0
def loadbook(bag,
             mmsid=None,
             outformat="JPEG",
             filter="ANTIALIAS",
             scale=0.4,
             crop=None,
             collection='islandora:bookCollection'):
    """
    Generate derivative of Bag and load into S3.
    
    args:
      bag: name of bag to load
      mmsid: MMS ID is needed to obtain MARC XML
      outformat - string representation of image format - default is "JPEG". 
                  Available Formats: http://pillow.readthedocs.io/en/3.4.x/handbook/image-file-formats.html
      scale - percentage to scale by represented as a decimal
      filter - string representing filter to apply to resized image - default is "ANTIALIAS"
      crop - list of coordinates to crop from - i.e. [10, 10, 200, 200]
      collection: Name of Islandora collection to ingest to. Default is: islandora:bookCollection  
    """

    # Generate derivatives and store in s3 an local
    deriv_gen = signature(
        "imageq.tasks.tasks.derivative_generation",
        kwargs={
            'bags': bag,
            's3_bucket': s3_bucket,
            's3_source': s3_source,
            's3_destination': s3_derivative,
            'outformat': outformat,
            'filter': filter,
            'scale': scale,
            'crop': crop,
            'upload_s3':
            False  # derivatives will be uploaded later during recipewriterq's process_derivative task
        })
    # generate recipe files and process derivatives into bags
    process_derivs = signature("recipewriterq.tasks.tasks.process_derivative",
                               kwargs={'mmsid': mmsid})

    # add entries to data catalog

    # load into islandora
    #ingest_recipe = signature("islandoraq.tasks.tasks.ingest_recipe", kwargs={'collection': collection})

    #chain = (deriv_gen | process_derivs | ingest_recipe)
    chain = (deriv_gen | process_derivs)
    result = chain()
    return "Kicked off tasks to generate derivative for {0}".format(bag)
Ejemplo n.º 23
0
def send_stacktrace_sentry(crash):
    stacktrace = crash.stacktrace_json
    exception = {
        "values": [{
            "type":
            stacktrace.get('crash_info', {}).get('type', 'unknown exception'),
            "value":
            stacktrace.get('crash_info', {}).get('crash_address', '0x0'),
            "stacktrace":
            stacktrace['crashing_thread']
        }]
    }

    data = {'sentry.interfaces.Exception': exception}

    if crash.userid:
        data['sentry.interfaces.User'] = dict(id=crash.userid)

    extra = dict(
        crash_admin_panel_url='http://{}{}'.format(
            settings.HOST_NAME, '/admin/crash/crash/%s/' % crash.pk),
        crashdump_url=crash.upload_file_minidump.url,
    )

    tags = {}
    if crash.meta:
        extra.update(crash.meta)
        ver = crash.meta.get('ver')
        if ver:
            tags['ver'] = ver
    if crash.channel:
        tags['channel'] = crash.channel
    if crash.archive:
        extra['archive_url'] = crash.archive.url

    tags.update(stacktrace.get('system_info', {}))

    if crash.appid:
        tags['appid'] = crash.appid

    event_id = client.capture('raven.events.Message',
                              message=crash.signature,
                              extra=extra,
                              tags=tags,
                              data=data)
    signature("tasks.get_sentry_link",
              args=(crash.pk, event_id)).apply_async(queue='private',
                                                     countdown=1)
Ejemplo n.º 24
0
def process_video(video_name: str):
    video = VideoCapture(str(Config.VIDEO_STORAGE / video_name))
    frames = _frame_from_video(video)

    chord_obj = chord(
        [
            signature(
                "detector.detector.detect_frames",
                args=(chunk, ),
                queue="detector",
            ) for chunk in _chunkit(frames)
        ],
        body=signature("detector.processor.merge_chunks", queue="processor"),
    )
    chord_res = chord_obj()
    return chord_res.id
Ejemplo n.º 25
0
def group_on_results(
    iterable: Iterable[Any],
    task_signature: Dict[str, Any],
    callback: Dict[str, Any],
    task_signature_errback: Dict[str, Any] = None,
    final: Optional[Dict[str, Any]] = None,
) -> Union[AsyncResult]:
    task_group = group(*(chain(
        signature(task_signature).clone((item, )).set(
            link_error=task_signature_errback),
        signature(callback),
    ) for item in iterable))

    if final is not None:
        return chord(task_group)(signature(final))
    return task_group.apply_async()
Ejemplo n.º 26
0
    def handle(self, *args, **options):
        filter_qs = {}

        # Don't include disabled projects by default
        if not options['disabled_projects']:
            filter_qs['entity__resource__project__disabled'] = False

        # Don't include obsolete by default
        if not options['obsolete_entities']:
            filter_qs['entity__obsolete'] = False

        translations_pks = (
            Translation.objects
            .filter(
                entity__resource__format__in=DB_FORMATS,
                **filter_qs
            )
            .values_list('pk', flat=True)
        )

        # Split translations into even batches and send them to Celery workers
        batch_size = int(options['batch_size'])
        group(
            signature(
                check_translations,
                args=(translations_pks[i:i + batch_size],)
            )
            for i in range(0, len(translations_pks), batch_size)
        ).apply_async()
Ejemplo n.º 27
0
    def transact_with_function_task(self,
                                    signing_address,
                                    contract_address,
                                    contract_type,
                                    func,
                                    args=None,
                                    gas_limit=None,
                                    prior_tasks=None,
                                    reverses_task=None):

        kwargs = {
            'signing_address': signing_address,
            'contract_address': contract_address,
            'abi_type': contract_type,
            'function': func,
            'args': args,
            'prior_tasks': prior_tasks,
            'reverses_task': reverses_task
        }

        if gas_limit:
            kwargs['gas_limit'] = gas_limit

        return signature(eth_endpoint('transact_with_contract_function'),
                         kwargs=kwargs)
Ejemplo n.º 28
0
 def send_eth_task(self, signing_address, amount_wei, recipient_address):
     return signature(eth_endpoint('send_eth'),
                      kwargs={
                          'signing_address': signing_address,
                          'amount_wei': amount_wei,
                          'recipient_address': recipient_address
                      })
Ejemplo n.º 29
0
    def test_on_chord_part_return(self, restore):
        tb = CacheBackend(backend='memory://', app=self.app)

        deps = Mock()
        deps.__len__ = Mock()
        deps.__len__.return_value = 2
        restore.return_value = deps
        task = Mock()
        task.name = 'foobarbaz'
        self.app.tasks['foobarbaz'] = task
        task.request.chord = signature(task)

        result = self.app.GroupResult(
            uuid(),
            [self.app.AsyncResult(uuid()) for _ in range(3)],
        )
        task.request.group = result.id
        tb.apply_chord(result, None)

        deps.join_native.assert_not_called()
        tb.on_chord_part_return(task.request, 'SUCCESS', 10)
        deps.join_native.assert_not_called()

        tb.on_chord_part_return(task.request, 'SUCCESS', 10)
        deps.join_native.assert_called_with(propagate=True, timeout=3.0)
        deps.delete.assert_called_with()
Ejemplo n.º 30
0
    def test_on_chord_part_return(self, restore):
        tb = CacheBackend(backend='memory://', app=self.app)

        deps = Mock()
        deps.__len__ = Mock()
        deps.__len__.return_value = 2
        restore.return_value = deps
        task = Mock()
        task.name = 'foobarbaz'
        self.app.tasks['foobarbaz'] = task
        task.request.chord = signature(task)

        result = self.app.GroupResult(
            uuid(),
            [self.app.AsyncResult(uuid()) for _ in range(3)],
        )
        task.request.group = result.id
        tb.apply_chord(result, None)

        deps.join_native.assert_not_called()
        tb.on_chord_part_return(task.request, 'SUCCESS', 10)
        deps.join_native.assert_not_called()

        tb.on_chord_part_return(task.request, 'SUCCESS', 10)
        deps.join_native.assert_called_with(propagate=True, timeout=3.0)
        deps.delete.assert_called_with()
Ejemplo n.º 31
0
def update_unconfigured_nodes():
    DBSession = get_sql_session()
    devices = DBSession.query(OpenWrt).filter(OpenWrt.configured==False)
    for device in devices:
        arguments = ( device.uuid, )
        update_device_task = signature('openwifi.jobserver.tasks.get_config',args=arguments)
        update_device_task.delay()
Ejemplo n.º 32
0
def get_provider_responses(provider=None):
    """
    Get the price responses from the various providers
    """
    providers_names = providers.__all__
    provider_list = []

    for provider_name in providers_names:
        if provider is not None and provider_name != provider:
            continue
        # generate a task signature for this provider
        provider_sig = signature(
            getattr(tasks, 'get_provider_response'),
            kwargs={
                'provider_name': provider_name
            },
            immutable=True
        )
        # append to the list of signatures
        provider_list.append(provider_sig)

    # turn the list of signatures into a Celery group
    provider_group = group(provider_list)
    # then run the group
    provider_group.apply_async()
Ejemplo n.º 33
0
 def apply_async(self, task_signature, ignore_result=False,
                 additional_params=None, task_args=None, task_kwargs=None):
     """
     Вызывает таск на стороне RabbitMQ источника
     :param task_signature: string сигнатура вызова таска
     :param ignore_result: bool признак "игнорировать результат от внешнего
     приложения"
     :param additional_params: dict дополнительные параметры (можно задать
     queue, routing_key, ...)
     :param task_args: list неименованные параметры
     :param task_kwargs: dict именованные параметры
     :return: результат выполнения таска
     """
     signature_task = signature(task_signature, app=self.app)
     async_result = signature_task.apply_async(
         args=task_args or (),
         kwargs=task_kwargs or {},
         **self.get_default_celery_settings(additional_params)
     )
     if not ignore_result:
         # мы ожидаем окончание выполнение таска и возвращаем его результат
         # для дальнейшего логирования
         external_task_result = async_result.get()
         if async_result.state == 'FAILURE':
             # при возникновении ошибки на стороне внешнего приложения,
             # async_result.state == 'FAILURE', а в async_result.traceback
             # содержится ее traceback
             raise Exception(async_result.traceback)
         return external_task_result
     # не возвращаем результат выполнения внешнего таска, если явно
     # игнорируем результат
     return
Ejemplo n.º 34
0
def get_ccxt_responses():
    """
    Get the ticker responses from the CCXT wrapper
    """
    exchange_list = []

    for exchange in ccxt.exchanges:
        try:
            wrapper = getattr(ccxt, exchange)()
        except Exception:
            continue

        if not wrapper.has['fetchTicker']:
            logger.info('No ticker available for {}'.format(exchange.title()))
            continue

        # generate a task signature for this exchange
        exchange_sig = signature(
            getattr(tasks, 'get_ccxt_response'),
            kwargs={
                'exchange': exchange
            },
            immutable=True
        )
        # append to the list of signatures
        exchange_list.append(exchange_sig)

    # turn the list of signatures into a Celery group
    exchange_group = group(exchange_list)
    # then run the group
    exchange_group.apply_async()
Ejemplo n.º 35
0
    def test_state_is_ready___run_is_started(self, status, task_id):
        with TemporaryDirectory() as d:
            with override_settings(MEDIA_ROOT=d):
                res_factory = FakeAsyncResultFactory(target_task_id=task_id)
                analysis = fake_analysis(status=status,
                                         run_task_id=task_id,
                                         input_file=fake_related_file(),
                                         settings_file=fake_related_file())
                initiator = fake_user()

                sig_res = Mock()
                sig_res.delay.return_value = res_factory(task_id)

                with patch(
                        'src.server.oasisapi.analyses.models.Analysis.run_analysis_signature',
                        PropertyMock(return_value=sig_res)):
                    analysis.run(initiator)

                    sig_res.link.assert_called_once_with(
                        record_run_analysis_result.s(analysis.pk,
                                                     initiator.pk))
                    sig_res.link_error.assert_called_once_with(
                        signature('on_error',
                                  args=('record_run_analysis_failure',
                                        analysis.pk, initiator.pk),
                                  queue=analysis.model.queue_name))
                    sig_res.delay.assert_called_once_with()
Ejemplo n.º 36
0
 def create_signature(vertex):
     task_name = 'tasks.{}.{}'.format(
         vertex['domain'],
         vertex['id']
     )
     task_config = vertex['parameters']
     return signature(task_name, kwargs=task_config)
Ejemplo n.º 37
0
def send_stacktrace_sentry(crash):
    stacktrace = crash.stacktrace_json
    exception = {
        "values": [
            {
                "type": stacktrace.get('crash_info', {}).get('type', 'unknown exception'),
                "value": stacktrace.get('crash_info', {}).get('crash_address', '0x0'),
                "stacktrace": stacktrace['crashing_thread']
            }
        ]
    }

    data = {'sentry.interfaces.Exception': exception}

    if crash.userid:
        data['sentry.interfaces.User'] = dict(id=crash.userid)

    extra = dict(
        crash_admin_panel_url='http://{}{}'.format(
            settings.HOST_NAME,
            '/admin/crash/crash/%s/' % crash.pk),
        crashdump_url=crash.upload_file_minidump.url,
    )

    tags = {}
    if crash.meta:
        extra.update(crash.meta)
        ver = crash.meta.get('ver')
        if ver:
            tags['ver'] = ver
    if crash.archive:
        extra['archive_url'] = crash.archive.url

    tags.update(stacktrace.get('system_info', {}))

    if crash.appid:
        tags['appid'] = crash.appid

    event_id = client.capture(
        'raven.events.Message',
        message=crash.signature,
        extra=extra,
        tags=tags,
        data=data
    )
    signature("tasks.get_sentry_link", args=(crash.pk, event_id)).apply_async(queue='private', countdown=1)
Ejemplo n.º 38
0
 def form_valid(self, form):
     email_sender = getattr(settings, 'EMAIL_SENDER', None)
     email_recipients = getattr(settings, 'EMAIL_RECIPIENTS', None)
     obj = form.save()
     if email_sender and email_recipients:
         (signature("tasks.send_email_feedback",
                    args=(obj.pk, email_sender, email_recipients))
          .apply_async(queue='private', countdown=1))
     return HttpResponse(obj.pk, status=200)
Ejemplo n.º 39
0
 def create_task(self):
     tid = uuid()
     task = Mock(name='task-{0}'.format(tid))
     task.name = 'foobarbaz'
     self.app.tasks['foobarbaz'] = task
     task.request.chord = signature(task)
     task.request.id = tid
     task.request.chord['chord_size'] = 10
     task.request.group = 'group_id'
     return task
Ejemplo n.º 40
0
 def create_task():
     tid = uuid()
     task = Mock(name="task-{0}".format(tid))
     task.name = "foobarbaz"
     self.app.tasks["foobarbaz"] = task
     task.request.chord = signature(task)
     task.request.id = tid
     task.request.chord["chord_size"] = 10
     task.request.group = "group_id"
     return task
Ejemplo n.º 41
0
    def test_unregistered_task_can_be_used_as_error_callback(self, mock_group):
        b = BaseBackend(app=self.app)
        b._store_result = Mock()

        request = Mock(name='request')
        request.errbacks = [signature('doesnotexist',
                                      immutable=True)]
        exc = KeyError()
        b.mark_as_failure('id', exc, request=request)
        mock_group.assert_called_once_with(request.errbacks, app=self.app)
Ejemplo n.º 42
0
def automatically_tag_with_stored_estimator(all_analysis_ids, stored_estimator_id):
    for analysis_ids in chunkify(all_analysis_ids, 100):
        analyses = [(
            analysis.id,
            analysis.hu_moments,
            analysis.centroid,
            analysis.orientation_from_moments,
        ) for analysis in dm.ImageAnalysis.objects.filter(id__in=analysis_ids)]

        estimator_object = dm.KMeansEstimator.objects.get(pk=stored_estimator_id)

        estimator = estimator_object.rebuilt_estimator
        scaler = estimator_object.rebuilt_scaler
        label_deltas = estimator_object.label_deltas_defaultdict

        (
            celery.signature('drone.compute_automatic_tags_with_estimator',
                             args=(analyses, estimator, scaler, label_deltas)) |
            celery.signature('results.store_automatic_analysis_tags')
        ).apply_async()
Ejemplo n.º 43
0
def automatically_tag_by_ellipse_search(all_image_ids, per_chunk=16):
    results = list()
    for image_ids in chunkify(all_image_ids, per_chunk):
        taggables = list()
        cals = dict()
        for image in list(dm.Image.objects.filter(id__in=image_ids)):
            data = image.jpeg

            cal_name = image.cjr.cal_image.image_file.file.name
            if cal_name not in cals:
                cals[cal_name] = image.cal_jpeg

            taggables.append((image.id, data, cal_name, image.search_envelope))

        results.append((
            celery.signature('drone.compute_automatic_tags_with_ellipse_search',
                             args=(taggables, cals)) |
            celery.signature('results.store_ellipse_search_tags')
        ).apply_async())

    return results
Ejemplo n.º 44
0
def trigger_org_task(task_name, queue="celery"):
    """
    Triggers the given org task to be run for all active orgs
    :param task_name: the full task name, e.g. 'myproj.myapp.tasks.do_stuff'
    :param queue: the name of the queue to send org sub-tasks to
    """
    active_orgs = apps.get_model("orgs", "Org").objects.filter(is_active=True)
    for org in active_orgs:
        sig = signature(task_name, args=[org.pk])
        sig.apply_async(queue=queue)

    logger.info("Requested task '%s' for %d active orgs" % (task_name, len(active_orgs)))
Ejemplo n.º 45
0
def handle_callback(callbacks=[]):
    '''
    A generic function that handles a list of callback functions, execute each callback in order
    :param  list(dict) callbacks: a list of celery signature objects
    '''
    if any(callbacks):
        callbacks = [signature(x) for x in callbacks]
        callbacks[0].kwargs['callbacks'] = callbacks[1:]  #Pass the rest callbacks into callbacks[0] in keyword arguments 'callbacks'
        logger.debug("Task: {0!r}".format(callbacks[0].get('task','Task name not found in handle_callback')))
        should_delay = callbacks[0].get('options',{}).get('should_delay', True)
        logger.debug("Should delay: {0!r}".format(should_delay))
        callbacks[0].delay() if should_delay else callbacks[0]()
    else:
        logger.debug("No callbacks")
Ejemplo n.º 46
0
def create_task_status(task_name, args=None, kwargs=None):
    sig = str(signature(task_name, args=args, kwargs=kwargs))  # args and kwargs required?

    # Set status to PENDING since it's not running yet
    init_status = PENDING

    # Check for timelimit
    annotations_for_task = resolve_annotations(task_name)
    timelimit = annotations_for_task.get('time_limit')

    # Determine when this task should expire
    utc_before = datetime.now(tzutc())
    expires_at = utc_before + timedelta(seconds=timelimit)

    status = TaskStatus.objects.create(
        status=init_status,
        signature=sig,
        expires_at=expires_at,
    )

    return status
Ejemplo n.º 47
0
    def test_on_chord_part_return(self, restore):
        b = self.MockBackend(app=self.app)
        deps = Mock()
        deps.__len__ = Mock()
        deps.__len__.return_value = 10
        restore.return_value = deps
        b.client.incr.return_value = 1
        task = Mock()
        task.name = 'foobarbaz'
        self.app.tasks['foobarbaz'] = task
        task.request.chord = signature(task)
        task.request.group = 'group_id'

        b.on_chord_part_return(task)
        self.assertTrue(b.client.incr.call_count)

        b.client.incr.return_value = len(deps)
        b.on_chord_part_return(task)
        deps.join_native.assert_called_with(propagate=True, timeout=3.0)
        deps.delete.assert_called_with()

        self.assertTrue(b.client.expire.call_count)
Ejemplo n.º 48
0
def lock_task(task_name, *args, **kwargs):
    """
    Try to lock a task: check whether or not a status entry exists for given
    parameters or create one (preventing others from being created).

    Returns whether or not the task was locked and a status instance.
    """
    locked, status = False, None

    # MUST use `str()`, otherwise it will invoke the task instead in the
    # query up ahead!
    sig = str(signature(task_name, args=args, kwargs=kwargs))

    # Create a status if allowed
    if not TaskStatus.objects\
            .filter(Q(expires_at__gte=datetime.now(tzutc())) | Q(expires_at__isnull=True),
                    signature=sig)\
            .filter(status__in=[PENDING, RECEIVED, STARTED, REVOKED, RETRY, IGNORED, REJECTED])\
            .exists():

        # Set status to PENDING since it's not running yet
        init_status = PENDING

        # Check for timelimit
        annotations_for_task = resolve_annotations(task_name)
        timelimit = annotations_for_task.get('time_limit')

        # Determine when this task should expire
        utc_before = datetime.now(tzutc())
        expires_at = utc_before + timedelta(seconds=timelimit)

        status = TaskStatus.objects.create(
            status=init_status,
            signature=sig,
            expires_at=expires_at,
        )
        locked = True

    return locked, status
Ejemplo n.º 49
0
    def test_on_chord_part_return(self, restore):
        tb = CacheBackend(backend='memory://', app=self.app)

        deps = Mock()
        deps.__len__ = Mock()
        deps.__len__.return_value = 2
        restore.return_value = deps
        task = Mock()
        task.name = 'foobarbaz'
        self.app.tasks['foobarbaz'] = task
        task.request.chord = signature(task)

        gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)]
        task.request.group = gid
        tb.on_chord_apply(gid, {}, result=res)

        self.assertFalse(deps.join_native.called)
        tb.on_chord_part_return(task)
        self.assertFalse(deps.join_native.called)

        tb.on_chord_part_return(task)
        deps.join_native.assert_called_with(propagate=True)
        deps.delete.assert_called_with()
def beat_init_handler(sender=None, **kwargs):
    
#     def get_run_times(trigger, first_run_time, end_time):
#         if end_time == None:
#             end_time = datetime.datetime.now(pytz.utc)
#         run_times = []
#         while first_run_time and first_run_time <= end_time:
#             run_times.append(first_run_time)
#             first_run_time = trigger.get_next_fire_time(first_run_time, first_run_time)
#         return run_times
    
    for v in CELERYBEAT_SCHEDULE.itervalues():
        trigger = CronTrigger(hour=','.join(str(h) for h in v['schedule'].hour),
                          start_date=v['options']['eta'],
                          end_date=v['options'].get('expires', None),
                          timezone=pytz.utc)
        next_fire_time = trigger.start_date
        while next_fire_time and next_fire_time <= (trigger.end_date if trigger.end_date else datetime.datetime.now(pytz.utc)):
            task = signature(v['task'], v.get('args', ()) + (next_fire_time,), v.get('kwargs', {}), v.get('options', {}), app)
            try:
                task()
            except Exception as e:
                logger.exception(e)
            next_fire_time = trigger.get_next_fire_time(next_fire_time, next_fire_time)
Ejemplo n.º 51
0
def monitor():
    return celery.group([celery.signature('psu.cached_report'),
                         celery.signature('cjc.queues_ping'),
                         celery.signature('cjc.thread_states')])()
Ejemplo n.º 52
0
def web_home_view(request):
    LOG.error("web_home_view")
    subtask = celery.signature('business.tasks.foo', args=(3,))
    callback = celery.signature('business.tasks.foo_callback')
    subtask.apply_async(link=callback)
    return {'ping': 'pong'}
Ejemplo n.º 53
0
def add_cb(x, y, callback=None):
    result = x + y
    if callback:
        return signature(callback).apply_async(result)
    return result
Ejemplo n.º 54
0
def crash_post_save(sender, instance, created, *args, **kwargs):
    if created and instance.upload_file_minidump:
        signature("tasks.processing_crash_dump", args=(instance.pk,)).apply_async(queue='private', countdown=1)
Ejemplo n.º 55
0
def monitor_process(task, cluster, job, pid, nohup_out_path,
                    log_write_url=None, on_complete=None,
                    output_message='Job download/upload error: %s',
                    girder_token=None):
    job_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
    log = get_post_logger(job['_id'], girder_token, job_url)
    headers = {'Girder-Token':  girder_token}
    job_id = job['_id']
    status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)

    try:
        # if terminating break out
        if _is_terminating(job, girder_token):
            return

        with get_connection(girder_token, cluster) as conn:
            # See if the process is still running
            output = conn.execute('ps %s | grep %s' % (pid, pid),
                                  ignore_exit_status=True,
                                  source_profile=False)

            if len(output) > 0:
                # Process is still running so schedule self again in about 5
                # secs
                # N.B. throw=False to prevent Retry exception being raised
                task.retry(throw=False, countdown=5)
            else:
                try:
                    nohup_out_file_name = os.path.basename(nohup_out_path)

                    # Log the output
                    with conn.get(nohup_out_path) as fp:
                        output = fp.read()
                        if output.strip():
                            log.error(output_message % output)
                            # If we have output then set the error state on the
                            # job and return
                            r = requests.patch(status_url, headers=headers,
                                               json={'status': JobState.ERROR})
                            check_status(r)
                            return
                finally:
                    if nohup_out_file_name and \
                       os.path.exists(nohup_out_file_name):
                        os.remove(nohup_out_file_name)

                # Fire off the on_compete task if we have one
                if on_complete:
                    signature(on_complete).delay()

                # If we where uploading move job to the complete state
                if job['status'] == JobState.UPLOADING:
                    job_status = from_string(job['status'], task=task,
                                             cluster=cluster, job=job,
                                             log_write_url=log_write_url,
                                             girder_token=girder_token,
                                             conn=conn)
                    job_status = Complete(job_status)
                    job_status = job_status.next(JobQueueState.COMPLETE)
                    job_status.run()
                    r = requests.patch(status_url, headers=headers,
                                       json={'status': str(job_status)})
                    check_status(r)

    except EOFError:
        # Try again
        task.retry(throw=False, countdown=5)
    except Exception as ex:
        r = requests.patch(status_url, headers=headers,
                           json={'status': JobState.UNEXPECTEDERROR})
        check_status(r)
        get_job_logger(job, girder_token).exception(ex.message)
        raise
Ejemplo n.º 56
0
def create_and_store_estimator(ted):
    return (
        celery.signature('learn.create_estimator', args=(ted,)) |
        celery.signature('results.store_estimator')
    ).apply_async()
Ejemplo n.º 57
0
 def regenerate_stacktrace(self, request, queryset):
     for i in queryset:
         signature("tasks.processing_crash_dump", args=(i.pk,)).apply_async(queue='default')
Ejemplo n.º 58
0
 def cleanup(self):
     model = self.initial['type'].split('__')
     task_kwargs = self.get_task_kwargs()
     signature("tasks.deferred_manual_cleanup", args=(model,), kwargs=task_kwargs).apply_async(queue='limitation')
Ejemplo n.º 59
0
def upload_job_output_to_item(cluster, job, log_write_url=None, job_dir=None,
                              girder_token=None):
    headers = {'Girder-Token':  girder_token}
    job_id = job['_id']
    status_url = '%s/jobs/%s' % (cumulus.config.girder.baseUrl, job_id)

    try:
        # if terminating break out
        if _is_terminating(job, girder_token):
            return

        with get_connection(girder_token, cluster) as conn:
            # First put girder client on master
            path = inspect.getsourcefile(cumulus.girderclient)
            with open(path, 'r') as fp:
                conn.put(fp,
                         os.path.normpath(os.path.join(job_dir, '..',
                                                       os.path.basename(path))))

            cmds = ['cd %s' % job_dir]
            upload_cmd = 'python ../girderclient.py --token %s --url "%s" ' \
                         'upload --job %s' \
                         % (girder_token,
                            cumulus.config.girder.baseUrl, job['_id'])

            upload_output = '%s.upload.out' % job_id
            upload_output_path = os.path.normpath(os.path.join(job_dir, '..',
                                                               upload_output))
            cmds.append('nohup %s  &> ../%s  &\n' % (upload_cmd, upload_output))

            upload_cmd = _put_script(conn, '\n'.join(cmds))
            output = conn.execute(upload_cmd)

            # Remove upload script
            conn.remove(upload_cmd)

        if len(output) != 1:
            raise Exception('PID not returned by execute command')

        try:
            pid = int(output[0])
        except ValueError:
            raise Exception('Unable to extract PID from: %s' % output)

        on_complete = None

        if _get_on_complete(job) == 'terminate':
            cluster_log_url = '%s/clusters/%s/log' % \
                (cumulus.config.girder.baseUrl, cluster['_id'])
            on_complete = signature(
                'cumulus.tasks.cluster.terminate_cluster',
                args=(cluster,), kwargs={'log_write_url': cluster_log_url,
                                         'girder_token': girder_token})

        monitor_process.delay(cluster, job, pid, upload_output_path,
                              log_write_url=log_write_url,
                              on_complete=on_complete,
                              girder_token=girder_token)

    except Exception as ex:
        r = requests.patch(status_url, headers=headers,
                           json={'status': JobState.UNEXPECTEDERROR})
        check_status(r)
        get_job_logger(job, girder_token).exception(ex.message)