Example #1
0
def update_endpoints(self, endpoint_list):
    # for now we process the enpoint even if they were already processed
    endpoint_to_process = endpoint_list.endpoint_set.filter(processed=False)
    total = endpoint_to_process.count()
    count = 0
    if not settings.SKIP_CELERY_TASK:
        # the task workflow must be a group of serials requests to each endpoint.
        # The celery chains links together signatures so that one is called after the other.
        tasks = []
        for endpoint in endpoint_to_process:
            # use immutable signatures, we dont want the result of the previous
            # task in the celery chain for the next task.
            tasks.append(update_endpoint.si(endpoint))
        chain(tasks)()
        # update state
        if not self.request.called_directly:
            self.update_state(
                state='PROGRESS',
                meta={'current': count, 'total': total}
            )
    else:
        for endpoint in endpoint_to_process:
            update_endpoint(endpoint)

    return True
Example #2
0
def check_service(self, service):
    # total is determined (and updated) exactly after service.update_layers
    total = 100

    def status_update(count):
        if not self.request.called_directly:
            self.update_state(
                state='PROGRESS',
                meta={'current': count, 'total': total}
            )

    status_update(0)
    service.update_layers()
    # we count 1 for update_layers and 1 for service check for simplicity
    layer_to_process = service.layer_set.all()
    total = layer_to_process.count() + 2
    status_update(1)
    service.check_available()
    status_update(2)
    count = 3

    if not settings.SKIP_CELERY_TASK:
        tasks = []
        for layer in layer_to_process:
            # update state
            status_update(count)
            tasks.append(check_layer.si(layer))
            count += 1
        chain(tasks)()
    else:
        for layer in layer_to_process:
            status_update(count)
            check_layer(layer)
            count += 1
Example #3
0
def import_task():
    """
    Enter all of the daily import tasks into the queue, where they can grind away from there.

    The import is broken up into tasks for a few reasons: it can be paused by stopping the sims queue if necessary;
    works around the celery task time limit.
    """
    if not settings.DO_IMPORTING_HERE:
        return

    tasks = [
        daily_cleanup.si(),
        fix_unknown_emplids.si(),
        get_role_people.si(),
        import_grads.si(),
        get_update_grads_task(),
        import_offerings.si(continue_import=True),
        import_semester_info.si(),
        import_active_grad_gpas.si(),
        #get_import_offerings_task(),
        #import_combined_sections.si(),
        #send_report.si()
    ]

    chain(*tasks).apply_async()
Example #4
0
 def give_to_pool(cls, pool, custom_limit=None):
     """Give appliances from shepherd to the pool where the maximum count is specified by pool
     or you can specify a custom limit
     """
     from appliances.tasks import (
         appliance_power_on, mark_appliance_ready, wait_appliance_ready, appliance_yum_update,
         appliance_reboot)
     limit = custom_limit if custom_limit is not None else pool.total_count
     appliances = []
     with transaction.atomic():
         for template in pool.possible_templates:
             for appliance in cls.unassigned().filter(
                     template=template).all()[:limit - len(appliances)]:
                 with appliance.kill_lock:
                     appliance.appliance_pool = pool
                     appliance.save()
                     appliance.set_status("Given to pool {}".format(pool.id))
                     tasks = [appliance_power_on.si(appliance.id)]
                     if pool.yum_update:
                         tasks.append(appliance_yum_update.si(appliance.id))
                         tasks.append(
                             appliance_reboot.si(appliance.id, if_needs_restarting=True))
                     if appliance.preconfigured:
                         tasks.append(wait_appliance_ready.si(appliance.id))
                     else:
                         tasks.append(mark_appliance_ready.si(appliance.id))
                     chain(*tasks)()
                     appliances.append(appliance)
             if len(appliances) == limit:
                 break
     return len(appliances)
Example #5
0
def main():
    logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(module)s.%(funcName)s - %(message)s")
    R = praw.Reddit(USER_AGENT)
    R.login(os.environ["PT_REDDIT_USERNAME"], os.environ["PT_REDDIT_PASSWORD"])
    logging.info("Starting check loop")
    while True:
        for message in R.get_unread():
            message.mark_as_read()
            if "/u/playlist-this!" not in message.body or not message.was_comment:
                continue

            if message.is_root:
                parent_permalink = message.submission.permalink
            else:
                parent_permalink = message.submission.permalink + message.parent_id[3:]

            chain(
                tasks.get_songs_from.s(parent_permalink),
                tasks.create_playlist.s(),
                tasks.generate_playlist_comment.s(),
                tasks.post_comment.s(message.permalink),
            ).apply_async()

        logging.info("Sleeping for %s seconds", CHECK_INTERVAL)
        time.sleep(CHECK_INTERVAL)
Example #6
0
def add_bot_v(request, form = 0):
    if form == 0:
        form = BotForm()
    if request.method == 'POST':
        form = BotForm(request.POST, request.FILES)
        if form.is_valid():
            # get challenge
            challenge = Challenge.objects.get( id = form.data["target_challenge"] )
            # create directory for a bot
            directory = challenge.directory + "bots/" + form.data["name"].replace(" ", "_") + "/"
            os.makedirs(directory)
            # upload source file
            upload_file(directory, request.FILES['source_file'])
            # gather data
            program = Program(compiler = Compiler.objects.get(id = form.data["compiler"]),
                              source_file = directory + request.FILES['source_file'].name )
            program.save()
            bot = Bot( name = form.data["name"], 
                       playing_program = program,
                       directory = directory,
                       owner = request.user,
                       target_challenge = challenge)
            # delay compilation
            recent_action = RecentAction(owner = request.user,
                                         message = "Bot validation: " + bot.name
                                                    + " (for challenge: " + challenge.title + ")",
                                         state = ActionState.objects.get(name = 'IN_QUEUE'))
            recent_action.save()
            chain(compile_bot.si(bot, recent_action), enqueue_bots_battles.si(bot)).apply_async() 
            return HttpResponseRedirect('/successful/')
    return render_to_response('ChallengeManagement/add_bot.xhtml',
                              { "form": form, "title" : "Add Bot" },
                              context_instance = RequestContext(request));
def cache_and_train(*args, **kwargs):
    """
    Cache plexon file (if using plexon system) and train BMI.
    """

    # import config
    if config.recording_sys['make'] == 'plexon':
        print "cache and train"
        entry = kwargs['entry']
        print entry
        plxfile = models.DataFile.objects.get(system__name='plexon', entry=entry)
        print plxfile

        if not plxfile.has_cache():
            cache = cache_plx.si(plxfile.get_path())
            train = make_bmi.si(*args, **kwargs)
            chain(cache, train)()
        else:
            print "calling"
            make_bmi.delay(*args, **kwargs)
    
    elif config.recording_sys['make'] == 'blackrock':
        make_bmi.delay(*args, **kwargs)
    
    else:
        raise Exception('Unknown recording_system!')
Example #8
0
def serve(app_name, uuid):
    client = xmlrpclib.Server(settings.EMPRESS_SERVICE_URL)
    app_info = client.private.app_info(app_name)

    if app_info.get('celery', {}).get('enabled'):
        chain(
            provide_virtualenv.s((app_info, uuid)),
            pull_source_code.s(),
            install_requirements.s(),
            syncdb_and_migrate.s(),
            render_uwsgi_config.s(),
            render_nginx_config.s(),
            reload_nginx.s(),
            render_supervisor_config.s(),
            reload_supervisor.s(),
            end.s(),
        ).apply_async()
    else:
        chain(
            provide_virtualenv.s((app_info, uuid)),
            pull_source_code.s(),
            install_requirements.s(),
            syncdb_and_migrate.s(),
            render_uwsgi_config.s(),
            render_nginx_config.s(),
            reload_nginx.s(),
            end.s(),
        ).apply_async()
Example #9
0
def fetch_with_chain(uid):
    user = User.objects.select_related().get(uid=uid)
    user_path = os.path.join(tempfile.tempdir, user.uid)
    if not os.path.exists(user_path):
        os.mkdir(user_path)
    working_tasks = get_subdirectories(user_path)
    if len(working_tasks):
        working_task = working_tasks[-1]
        task_result = AsyncResult(working_task)
        return task_result.status
    else:
        task_id = str(uuid.uuid4())
        task_path = os.path.join(user_path, task_id)
        os.mkdir(task_path)
        status = chain(
            fetch_status.s(user.password),
            parse_status.s(user.get_full_name(), task_path)
        )
        photos = chain(
            fetch_photos.s(user.password),
            download_photos.s(task_path)
        )
        friends = chain(
            fetch_friends.s(user.password),
            download_friends.s(task_path)
        )
        return chord(group(status, photos, friends), compress_and_upload.s(uid, task_path)).apply_async(task_id=task_id)
Example #10
0
def liberate_collect_emails(results, mail_path, options):
    """ Send off data mining tasks """
    msg_tasks = []
    results = results or []
    for result in results:
        inbox = [(mail_path, result['folder'], email_id) for email_id in result['ids']]
        msg_tasks.extend(inbox)

    task_len = len(msg_tasks)

    if task_len > 0:
        msg_tasks = liberate_message.chunks(msg_tasks, 100).group()
        msg_tasks.skew(step=10)
        msg_tasks = chain(
                        msg_tasks,
                        liberate_convert_box.s(mail_path, options),
                        liberate_fetch_info.s(options),
                        liberate_tarball.s(options),
                        liberation_finish.s(options)
                        )
    else:
        options["noEmails"] = True
        data = {"results": []}
        msg_tasks = chain(
                        liberate_convert_box.s(data, mail_path, options),
                        liberate_fetch_info.s(options),
                        liberate_tarball.s(options),
                        liberation_finish.s(options)
                        )

    async_result = msg_tasks.apply_async()

    lib_status = get_user_model().objects.get(id=options["user"]).liberation
    lib_status.async_result = async_result.id
    lib_status.save()
Example #11
0
def _start_back_end(record, priority=False):
    """
    kick off the asynchronous licence lookup process.  There is no need for this to return
    anything, although a handle on the asynchronous request object is provided for convenience of
    testing
    
    arguments:
    record -- an OAG record object, see the module documentation for details
    
    returns:
    AsyncRequest object from the Celery framework
    
    """
    log.debug("injecting record into asynchronous processing chain: " + str(record))
    
    # ask the record to prep itself for injection into the processing chain
    # this will return just the record dictionary (not the message object), and will
    # have removed the existing journal information and added any processing flags 
    # that are relevant
    chainable = record.prep_for_backend() 
    log.debug("record prepped for chain: " + str(chainable))
    
    if priority:
        ch = chain(priority_detect_provider.s(chainable), priority_provider_licence.s(), priority_store_results.s())
        r = ch.apply_async()
        return r
    else:
        ch = chain(detect_provider.s(chainable), provider_licence.s(), store_results.s())
        r = ch.apply_async()
        return r
Example #12
0
def taskPeriodicTasks():
    haveLock = False
    Lock = redis.Redis().lock("project")
    try:
        haveLock = Lock.acquire(blocking=False)
        logger.info(haveLock)
        if haveLock:
            logger.info("Locked")
            chain(
                scraper.getProjectsList(),
                scraper.getRepos(),
                scraper.getComponents(),
                scraper.getBugStatus(),
                scraper.getBugSeverity(),
                scraper.getBugPriority(),
                scraper.getTestCoverage(),
                scraper.getSuccessDensity(),
                scraper.getCommitCountTotal(),
                scraper.getContributorsCount(),
                scraper.getCommitCountLastWeek()
            )
    finally:
        if haveLock:
            logger.info("Released")
            Lock.release()
Example #13
0
    def _execute_callback(self, lab_name, **kwargs):
        action = kwargs["action"]
        response = hresponse.HandlerResponse()
        response.status_code = 202
        response.reason = "Request accepted and being processed"

        if action in ["boot", "test"]:
            tasks = [
                taskqueue.tasks.callback.lava_test.s(
                    self.json_obj, self.job_meta, lab_name),
            ]
            if action == "test":
                tasks.append(taskqueue.tasks.test.find_regression.s())
            chain(tasks).apply_async(
                link_error=taskqueue.tasks.error_handler.s())
        else:
            response.status_code = 404
            response.reason = "Unsupported LAVA action: {}".format(action)

        # Also run the legacy boot callback to generate boot entries
        if action == "boot":
            tasks = [
                taskqueue.tasks.callback.lava_boot.s(
                    self.json_obj, self.job_meta, lab_name),
                taskqueue.tasks.boot.find_regression.s(),
            ]
            chain(tasks).apply_async(
                link_error=taskqueue.tasks.error_handler.s())

        return response
Example #14
0
def test_mutable_chain_with_multiple_return_values():
    """See what happens when a task in a chain returns
    multiple values

    #NOTE: scoping tasks in a function is purely for readability,
    these tasks will still b global and will still persist after
    this test has completed.

    RESULT: all return values are provided in a tuple in args[0]
    """
    @task
    def task_that_returns_multiple_values():
        return 1, 2, 3, {'one': 1}

    @task
    def task_that_accepts_values(*args, **kwargs):
        print(args)
        print(kwargs)

    chain(
        task_that_returns_multiple_values.s(),
        task_that_accepts_values.s(),
    ).apply_async().get()

    assert 1==1
Example #15
0
    def test_chord_in_chords_with_chains(self, manager):
        try:
            manager.app.backend.ensure_chords_allowed()
        except NotImplementedError as e:
            raise pytest.skip(e.args[0])

        c = chord(
            group([
                chain(
                    add.si(1, 2),
                    chord(
                        group([add.si(1, 2), add.si(1, 2)]),
                        add.si(1, 2),
                    ),
                ),
                chain(
                    add.si(1, 2),
                    chord(
                        group([add.si(1, 2), add.si(1, 2)]),
                        add.si(1, 2),
                    ),
                ),
            ]),
            add.si(2, 2)
        )

        r = c.delay()

        assert r.get(timeout=TIMEOUT) == 4
Example #16
0
def handle_analysis(analysis_result, event_id, locale='en'):
    """Handle analysis products"""
    flood = Flood.objects.get(id=event_id)
    flood.inspected_language = locale

    task_state = 'FAILURE'
    if analysis_result['status'] == RESULT_SUCCESS:
        try:
            flood.impact_file_path = analysis_result['output'][
                'analysis_summary']

            task_state = 'SUCCESS'
            process_impact_layer.delay(flood)

            chain(
                get_keywords.s(
                    flood.impact_file_path,
                    keyword='keyword_version'
                ).set(queue=get_keywords.queue),

                handle_keyword_version.s(
                    flood.id
                ).set(queue=handle_keyword_version.queue)

            ).delay()
        except BaseException as e:
            LOGGER.exception(e)
    else:
        LOGGER.error(analysis_result['message'])

    flood.analysis_task_status = task_state
    flood.analysis_task_result = json.dumps(analysis_result)
    flood.save()

    return analysis_result
Example #17
0
 def worker(self):
     res_pastie = chain(task_pastie_grabber.s() |
                        task_check_link_redis.s() |
                        task_download_pastes.s() |
                        task_add_downloaded_link_redis.s()
     )
     res_nopaste = chain(task_nopaste_grabber.s() |
                         task_check_link_redis.s() |
                         task_download_pastes.s() |
                         task_add_downloaded_link_redis.s()
     )
     res_pastebin = chain(task_pastebin_grabber.s() |
                          task_check_link_redis.s() |
                          task_download_pastes.s() |
                          task_add_downloaded_link_redis.s()
     )
     # res_pastesite = chain(task_pastesite_grabber.s() |
     #                       task_check_link_redis.s() |
     #                       task_download_pastes.s() |
     #                       task_add_downloaded_link_redis.s()
     # )
     g_res = group(res_nopaste,
                   res_pastie,
                   res_pastebin,
                   # res_pastesite
     )
     g_res.apply_async()
Example #18
0
    def reply_list(self, sms_wrapper, msgs):
        """Simple wrapper for returning a text message response given a list of texts to return
        sms_wrapper:        SmsMessage object (used for metadata)
        msg:                Iterable containing the messages to send. It is the caller's responsibility
                            to ensure they're the proper length.
        """
        if not msgs or len(msgs) < 1:
            return Response()

        callback_url = self.request.build_absolute_uri(reverse('sms-callback'))
        args = {
            'callback': callback_url,
            'from_': sms_wrapper.to_phone,
            'to': sms_wrapper.from_phone,
        }

        # First item uses different call/args
        # See http://docs.celeryproject.org/en/latest/userguide/canvas.html#chains
        msg_chain = [sms_bunny.s(args, msgs[0])]
        for m in msgs[1:]:
            msg_chain.append(sms_bunny.subtask((m,), countdown=self._sms_delay))

        try:
            chain(*msg_chain).apply_async()
        except socket.error:
            # Not finding celery counts as 502: bad gateway
            return HttpResponse('Couldn\'t connect to celery to send SMS messages', status=502)

        return Response()
Example #19
0
def UpdateRepo(repo):

    # find our repository
    repoRoot = CFGDATA.get("RepoRoot")
    repoPath = os.path.join(repoRoot, repo)
    if not os.path.exists(repoPath):
        return

    # lift the repo description as we might need to create the repo first
    repoDesc = "This repository has no description"
    repoDescFile = os.path.join(repoPath, "description")
    if os.path.exists(repoDescFile):
        with open(repoDescFile) as f:
            repoDesc = f.read().strip()

    # spawn push to github task first
    if CFGDATA.get("GithubEnabled") and (not isExcluded(repo, CFGDATA.get("GithubExcepts"))):
        githubPrefix = CFGDATA.get("GithubPrefix")
        githubUser = CFGDATA.get("GithubUser")
        githubRemote = "%[email protected]:%s/%s" % (githubUser, githubPrefix, repo)

        createTask = CeleryWorkers.CreateRepoGithub.si(repo, repoDesc)
        syncTask = CeleryWorkers.SyncRepo.si(repoPath, githubRemote, True)
        celery.chain(createTask, syncTask)()

    # now spawn all push to anongit tasks
    if CFGDATA.get("AnongitEnabled") and (not isExcluded(repo, CFGDATA.get("AnongitExcepts"))):
        anonUser = CFGDATA.get("AnongitUser")
        anonPrefix = CFGDATA.get("AnongitPrefix")
        for server in CFGDATA.get("AnongitServers"):
            anonRemote = "%s@%s:%s/%s" % (anonUser, server, anonPrefix, repo)

            createTask = CeleryWorkers.CreateRepoAnongit.si(repo, server, repoDesc)
            syncTask = CeleryWorkers.SyncRepo.si(repoPath, anonRemote, False)
            celery.chain(createTask, syncTask)()
Example #20
0
def create_workflow_task(self, ctx):
    """create_workflow_task celery task
    :param self: celery task instance
    :param ctx: workflow state
    :type ctx: dict
    :return: dict(ctx, workflow)
    """
    assert ctx['workflow_cycle_count'] > 0, 'workflow_cycle_count have to be bigger then 0'
    assert ctx['operation_concurrent_count'] > 0, 'operation_concurrent_count have to be bigger then 0'

    logger.info('creating workflow')
    ctx.setdefault('workflow_count', 0)
    ctx['workflow_count'] += 1

    if ctx['workflow_count'] < ctx['workflow_cycle_count']:
        logger.info('need to repeat workflow')
        chord_callback_tasks = chain(
            operation_summery_task.subtask((), {'ctx': ctx}),
            create_workflow_task.subtask())
    else:
        logger.info('do not need to repeat workflow')
        chord_callback_tasks = chain(
            operation_summery_task.subtask((), {'ctx': ctx}),
            cleanup_task.subtask(()),
        )

    logger.info('workflow count: {ctx[workflow_count]}'.format(ctx=ctx))
    return {
        'ctx': ctx,
        'workflow': chord(
            operation_task.s(index=index, ctx=ctx)
            for index in xrange(ctx['operation_concurrent_count'])
        )(chord_callback_tasks),
    }
Example #21
0
def task_group():
    # 100 random num(group) -> filter -> sum (chain)
    # execute several tasks in parallel.
    g = group(rand.s(x) for x in range(100))
    # chain a group together with another task will auto upgrade to be a chord
    gchain = chain(g, filter.s(), xsum.s())
    gchain.apply_async(countdown=1)
    # logger.warning(datetime.now() + timedelta(seconds=5))
    # gchain.apply_async(eta=datetime.now() + timedelta(seconds=5))
    # gchain.apply_async(countdown=5)

    job = group([
        add.s(1, 2),
        add.s(2, 3),
        add.s(3, 4),
        add.s(4, 5),
    ])
    chain(job, xsum.s()).apply_async()
    # chord(job, xsum.s()).apply_async()
    # sync call
    if settings.CELERY_ALWAYS_EAGER:
        res = job.delay()
        cnt = 0
        while not res.ready():
            print('wait job ready%s' % '.' * cnt)
        # async call will stucked here
        print('job result: %s' % sum(res.get(timeout=2)))
Example #22
0
    def test_tr55_job_error_in_chain(self):
        model_input = {
            'inputs': [],
            'area_of_interest': {
                'type': 'MultiPolygon',
                'coordinates': [[
                    [[-75.06271362304688, 40.15893480687665],
                     [-75.2728271484375, 39.97185812402586],
                     [-74.99130249023438, 40.10958807474143],
                     [-75.06271362304688, 40.15893480687665]]
                ]]
            },
            'modification_pieces': [],
            'modifications': [],
            'modification_hash': 'j39fj9fg7yshb399h4nsdhf'
        }

        job_chain = views._construct_tr55_job_chain(model_input,
                                                    self.job.id)
        self.assertTrue('tasks.start_histograms_job' in str(job_chain[0]))
        self.assertTrue('tasks.get_histogram_job_results' in str(job_chain[1]))
        job_chain = [get_test_histogram.s()] + job_chain[2:]

        with self.assertRaises(Exception) as context:
            chain(job_chain).apply_async()

        self.assertEqual(str(context.exception),
                         'No precipitation value defined',
                         'Unexpected exception occurred')
Example #23
0
File: tasks.py Project: ox-it/moxie
def import_all(force_update_all=False):
    """Start all the importers sequentially and attempt
    to move the result index to production
    """
    app = create_app()
    with app.blueprint_context(BLUEPRINT_NAME):
        solr_server = app.config['PLACES_SOLR_SERVER']
        staging_core = app.config['PLACES_SOLR_CORE_STAGING']
        staging_core_url = '{server}/{core}/update'.format(server=solr_server, core=staging_core)

        # make sure the "staging" index is empty
        delete_response = requests.post(staging_core_url, '<delete><query>*:*</query></delete>', headers={'Content-type': 'text/xml'})
        commit_response = requests.post(staging_core_url, '<commit/>', headers={'Content-type': 'text/xml'})

        if delete_response.ok and commit_response.ok:
            logger.info("Deleted all documents from staging, launching importers")
            # Using a chain (seq) so tasks execute in order
            chain(import_oxpoints.s(force_update=force_update_all),
                  import_oxpoints_organisation_descendants.s(force_update=force_update_all),
                  import_osm.s(force_update=force_update_all),
                  import_naptan.s(force_update=force_update_all),
                  import_ox_library_data.s(force_update=force_update_all),
                  swap_places_cores.s())()
            return True
        else:
            logger.warning("Staging core not deleted correctly, aborting", extra={
                'delete_response': delete_response.status_code,
                'commit_response': commit_response.status_code
            })
        return False
Example #24
0
def _using_lock(self, name, do_task, cleanup_tasks=None,
                error_tasks=None):
    """
    Applies lock for the deployment

    :return: Lock object (dictionary)
    :rtype: dict
    """
    try:
        lock = LockService().apply_lock(name)
    except ResourceLockedException as lock_error:
        raise self.retry(exc=lock_error)

    _release_lock_s = _release_lock.si(lock)
    cleanup_tasks = cleanup_tasks or []
    if not isinstance(cleanup_tasks, list):
        cleanup_tasks = [cleanup_tasks]

    error_tasks = error_tasks or []
    if not isinstance(error_tasks, list):
        error_tasks = [error_tasks]

    error_tasks.append(_release_lock_s)
    cleanup_tasks.append(_release_lock_s)

    return (
        do_task
    ).apply_async(
        link=chain(cleanup_tasks),
        link_error=chain(error_tasks)
    )
def get_district_attachment_pages(options, rd_pks, tag_names, session):
    """Get the attachment page information for all of the items selected

    :param options: The options returned by argparse. Should have the following
    keys:
     - queue: The celery queue to use
     - offset: The offset to skip
     - limit: The limit to stop after
    :param rd_pks: A list or ValuesList of RECAPDocument PKs to get attachment
    pages for.
    :param tag_names: A list of tags to associate with the downloaded items.
    :param session: A PACER logged-in PacerSession object
    :return None
    """
    q = options['queue']
    recap_user = User.objects.get(username='******')
    throttle = CeleryThrottle(queue_name=q)
    for i, rd_pk in enumerate(rd_pks):
        if i < options['offset']:
            continue
        if i >= options['limit'] > 0:
            break
        throttle.maybe_wait()
        chain(
            get_attachment_page_by_rd.s(rd_pk, session.cookies).set(queue=q),
            make_attachment_pq_object.s(rd_pk, recap_user.pk).set(queue=q),
            process_recap_attachment.s(tag_names=tag_names).set(queue=q),
        ).apply_async()
Example #26
0
def update_search_pattern_sets():
    """Update all records affected by search-patterned OAISets.

    In order to avoid racing condition when editing the records, all
    OAISet task groups are chained.
    """
    oaisets = OAISet.query.filter(OAISet.search_pattern.isnot(None))
    chain(make_oai_task_group(oais) for oais in oaisets).apply_async()
Example #27
0
    def test_report(self):
        chain(tasks.requests_fetch.s(), tasks.requests_report.s()).delay()

        self.assertEqual(len(mail.outbox), 1)
        self.assertIn("Amount: 200", mail.outbox[0].body)
        self.assertIn("User: %s" % (self.user.username), mail.outbox[0].body)
        self.assertIn("Date:", mail.outbox[0].body)
        self.assertIn("Current: %s" % (self.user.userprofile.pool_amount,), mail.outbox[0].body)
 def handle(self, text):
     try:
         chain(capture_image.s(),
               send_twitter_status.s(text),
               text_back.s(self.msg.connection.identity)).apply_async()
     except Exception as ex:
         logger.warn(ex)
         return
    def _run_reindex_tasks(self, models, queue):
        apply_async_kwargs = {'priority': 0}
        if queue:
            log.info('Adding indexing tasks to queue {0}'.format(queue))
            apply_async_kwargs['queue'] = queue
        else:
            log.info('Adding indexing tasks to default queue')

        index_time = timezone.now()
        timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')

        for doc in registry.get_documents(models):
            queryset = doc().get_queryset()
            # Get latest object from the queryset

            app_label = queryset.model._meta.app_label
            model_name = queryset.model.__name__

            index_name = doc._doc_type.index
            new_index_name = "{}_{}".format(index_name, timestamp)
            # Set index temporarily for indexing,
            # this will only get set during the running of this command
            doc._doc_type.index = new_index_name

            pre_index_task = create_new_es_index.si(app_label=app_label,
                                                    model_name=model_name,
                                                    index_name=index_name,
                                                    new_index_name=new_index_name)

            indexing_tasks = self._get_indexing_tasks(app_label=app_label, model_name=model_name,
                                                      queryset=queryset,
                                                      index_name=new_index_name,
                                                      document_class=str(doc))

            post_index_task = switch_es_index.si(app_label=app_label, model_name=model_name,
                                                 index_name=index_name,
                                                 new_index_name=new_index_name)

            # Task to run in order to add the objects
            # that has been inserted into database while indexing_tasks was running
            # We pass the creation time of latest object, so its possible to index later items
            missed_index_task = index_missing_objects.si(app_label=app_label,
                                                         model_name=model_name,
                                                         document_class=str(doc),
                                                         index_generation_time=index_time)

            # http://celery.readthedocs.io/en/latest/userguide/canvas.html#chords
            chord_tasks = chord(header=indexing_tasks, body=post_index_task)
            if queue:
                pre_index_task.set(queue=queue)
                chord_tasks.set(queue=queue)
                missed_index_task.set(queue=queue)
            # http://celery.readthedocs.io/en/latest/userguide/canvas.html#chain
            chain(pre_index_task, chord_tasks, missed_index_task).apply_async(**apply_async_kwargs)

            message = ("Successfully issued tasks for {}.{}, total {} items"
                       .format(app_label, model_name, queryset.count()))
            log.info(message)
def call_check(data):
    join_room(data["transaction_id"])
    url = data["url"]
    if not re.match(r"^http\://", url):
        data["url"] = "http://%s" % url
    for location in app.config["LOCATIONS"]:
        for testsuite in location["testsuites"]:

            input_data = {
                "transaction_id": data["transaction_id"],

                "location": location["location"],
                "country": location["country"],
                "ISP": location["ISP"],
                "url": data["url"],
            }
            location_queue = "%s_%s" % (location["location"].lower().replace(" ", "_"), location["ISP"].lower().replace(" ", "_"))
            logging.warn(location_queue)

            input_data["test_type"] = testsuite
            if testsuite in ("dns_google", "dns_TM", "dns_opendns"):
                for server in app.config["TESTSUITES"][testsuite]["servers"]:
                    input_data["task_id"] = str(uuid.uuid4())

                    extra_attr = {
                        "provider": app.config["TESTSUITES"][testsuite]["provider"],
                        "server": server
                    }
                    input_data["extra_attr"] = extra_attr
                    logging.warn("DNS Check")
                    description = "%s server: %s " % (app.config["TESTSUITES"][testsuite]["description"], server)
                    input_data["description"] = description
                    result_data = ResultData.from_json(input_data, extra_attr=extra_attr)
                    task = chain(
                            call_dns_task.s(result_data.to_json()).set(queue=location_queue),
                            update_entry.s().set(queue="basecamp"),
                            post_update.s().set(queue="basecamp")
                        ).apply_async()

            else:
                input_data["task_id"] = str(uuid.uuid4())

                input_data["description"] = app.config["TESTSUITES"][testsuite]["description"]
                result_data = ResultData.from_json(input_data)

                if testsuite == "http":
                    task = chain(
                        call_http_task.s(result_data.to_json()).set(queue=location_queue),
                        update_entry.s().set(queue="basecamp"),
                        post_update.s().set(queue="basecamp")
                    ).apply_async()
                elif testsuite == "http_dpi_tampering":
                    task = chain(
                        call_http_dpi_tampering_task.s(result_data.to_json()).set(queue=location_queue),
                        update_entry.s().set(queue="basecamp"),
                        post_update.s().set(queue="basecamp")
                    ).apply_async()
            emit("result_received", result_data.to_json(), room=result_data.transaction_id)
Example #31
0
def create_job(**body):
    """
    This function creates a new Transfer job.
    TODO add some logic
    """
    msg = body['body']
    tempfile = 'temp/' + str(uuid.uuid1())

    def _in():
        if 'http' in msg['input']['type']:
            LOGGER.info(msg['input']['type']['http'])
            _input = 'fromUrl'
            return msg['input']['type']['http'], _input
        if 'ftp' in msg['input']['type']:
            _input = 'fromFtp'
            return msg['input']['type']['ftp'], _input

    def _out():
        if 'filesystem' in msg['output']['type']:
            _output = 'toFilesystem'
            return msg['output']['type']['filesystem'], _output
        if 'ftp' in msg['output']['type']:
            _output = 'toFtp'
            return msg['output']['type']['ftp'], _output

    worker_msg = {**_in()[0], **_out()[0]}
    _input = _in()[1]
    _output = _out()[1]
    LOGGER.info(worker_msg)
    if _output == 'toFilesystem':
        LOGGER.info('output set to filesystem path')
    if _output == 'toFtp':
        LOGGER.info('output set to ftp')


#  worker logic comes here
#  Todo refactor to something useful
    if _input == 'fromUrl' and _output == 'toFilesystem':
        if 'user' in worker_msg and 'passwd' in worker_msg:
            job = transferFromUrl.s(uri=worker_msg['uri'],
                                    destPath=worker_msg['destPath'],
                                    user=worker_msg['user'],
                                    password=worker_msg['passwd'])
            task = job.apply_async(retry=True)
            job_id = task.id
        else:
            job = transferFromUrl.s(uri=worker_msg['uri'],
                                    destPath=worker_msg['destPath'])
            task = job.apply_async()
            job_id = task.id
        worker_msg['job_id'] = job_id
        return worker_msg
    if _input == 'fromFtp' and _output == 'toFilesystem':
        job = transferFromFtp.s(password=worker_msg['passwd'],
                                host=worker_msg['host'],
                                user=worker_msg['user'],
                                path=worker_msg['path'],
                                destpath=worker_msg['destPath'])
        task = job.apply_async(retry=True)
        job_id = task.id
        worker_msg['job_id'] = job_id
        return worker_msg
    if _input == 'fromFtp' and _output == 'toFtp':
        job = chain(
            transferFromFtp.s(password=worker_msg['passwd'],
                              host=worker_msg['host'],
                              user=worker_msg['user'],
                              path=worker_msg['path'],
                              destpath=tempfile),
            ftp_result.s(server=worker_msg['ftpHost'],
                         destfile=worker_msg['ftpPath'],
                         user=worker_msg['ftpUser'],
                         passwd=worker_msg['ftpPasswd']))
        task = job.apply_async(retry=True)
        job_id = task.id
        worker_msg['job_id'] = job_id
        delta_time = datetime.utcnow() + timedelta(days=1)
        LOGGER.info('scheduled deletion of %s in 1 day', tempfile)
        deleteFile.apply_async((tempfile, ), eta=delta_time)
        return worker_msg
    if _input == 'fromUrl' and _output == 'toFtp':
        if 'user' in worker_msg and 'passwd' in worker_msg:
            job = chain(
                transferFromUrl.s(uri=worker_msg['uri'],
                                  user=worker_msg['user'],
                                  password=worker_msg['passwd'],
                                  destPath=tempfile),
                ftp_result.s(server=worker_msg['ftpHost'],
                             destfile=worker_msg['ftpPath'],
                             user=worker_msg['ftpUser'],
                             passwd=worker_msg['ftpPasswd']))
        else:
            job = chain(
                transferFromUrl.s(uri=worker_msg['uri'], destPath=tempfile),
                ftp_result.s(server=worker_msg['ftpHost'],
                             destfile=worker_msg['ftpPath'],
                             user=worker_msg['ftpUser'],
                             passwd=worker_msg['ftpPasswd']))
        task_chain = job.apply_async(retry=True)
        job_id = task_chain.id
        worker_msg['job_id'] = job_id
        delta_time = datetime.utcnow() + timedelta(days=1)
        LOGGER.info('scheduled deletion of %s in 1 day', tempfile)
        deleteFile.apply_async((tempfile, ), eta=delta_time)
        return worker_msg
Example #32
0
from celery import chain, group, chord

from tasks import add, xsum, raise_error, on_chord_error


if __name__ == '__main__':
    ## chain primitive
    res = chain(add.s(2, 2), add.s(4), add.s(8))()
    print('Chain result: %s' % res.get())
    
    # shortcut of above
    res = (add.s(2, 2) | add.s(4) | add.s(8))()
    print('Chain shortcut: %s' % res.get())
    
    res = chain(add.si(2, 2), add.si(4, 5), add.si(8, 8))()
    print('Chain with independent task: %s' % res.get())           # 8 + 8
    print('Parent result: %s' % res.parent.get())                  # 4 + 5
    print('Parent of parent result: %s' % res.parent.parent.get()) # 2 + 2
    
    
    ## group primitive
    res = group(add.s(i, i) for i in range(10))()
    print('Group result: %s' % res.get())
    
    
    ## chord primitive
    res = chord((add.s(i, i) for i in range(10)), xsum.s())()
    # is equal to: group(add.s(i, i) for i in range(10)) | xsum.s()
    print('Chord result: %s' % res.get())
    
    res = chord([add.s(2, 2), raise_error.s(), add.s(4, 4)], xsum.s())()
Example #33
0
def task_delete_ibm_subnet_workflow(self, task_id, cloud_id, region,
                                    subnet_id):
    """
    This request deletes a VPC Subnet and its attached resources
    such as vpn gateways and its attached resources,load balancers
    and its attached resources,and the instances and its floating ip
    @return:
    """

    workflow_steps, subnet_tasks_list, floating_ip_tasks_list = list(), list(
    ), list()
    vpn_instance_tasks_list, load_balancer_task_list = list(), list()
    ibm_subnet = doosradb.session.query(IBMSubnet).filter_by(
        id=subnet_id).first()
    if not ibm_subnet:
        return

    for lb in ibm_subnet.ibm_load_balancers:
        load_balancer_task_list.append(
            task_delete_ibm_load_balancer.si(task_id=task_id,
                                             cloud_id=cloud_id,
                                             region=region,
                                             load_balancer_id=lb.id))

    for vpn in ibm_subnet.vpn_gateways.all():
        vpn_instance_tasks_list.append(
            task_delete_ibm_vpn_gateway.si(task_id=task_id,
                                           cloud_id=cloud_id,
                                           region=region,
                                           vpn_id=vpn.id))

    for network_interface in ibm_subnet.network_interfaces.all():
        if network_interface.ibm_instance:
            if network_interface.floating_ip:
                floating_ip_tasks_list.append(
                    task_delete_ibm_floating_ip.si(
                        task_id=task_id,
                        cloud_id=cloud_id,
                        region=region,
                        floating_ip_id=network_interface.floating_ip.id))

        vpn_instance_tasks_list.append(
            task_delete_ibm_instance.si(
                task_id=task_id,
                cloud_id=cloud_id,
                region=region,
                instance_id=network_interface.ibm_instance.id))

    if load_balancer_task_list and len(load_balancer_task_list) == 1:
        workflow_steps.extend(load_balancer_task_list)
    elif load_balancer_task_list:
        workflow_steps.append(
            chord(
                group(load_balancer_task_list),
                update_group_tasks.si(
                    task_id=task_id,
                    cloud_id=cloud_id,
                    region=region,
                    message="Load Balancers Tasks Chord Finisher")))

    if floating_ip_tasks_list and len(floating_ip_tasks_list) == 1:
        workflow_steps.extend(floating_ip_tasks_list)
    elif floating_ip_tasks_list:
        workflow_steps.append(
            chord(
                group(floating_ip_tasks_list),
                update_group_tasks.si(
                    task_id=task_id,
                    cloud_id=cloud_id,
                    region=region,
                    message="Floating IP's Tasks Chord Finisher")))

    if vpn_instance_tasks_list and len(vpn_instance_tasks_list) == 1:
        workflow_steps.extend(vpn_instance_tasks_list)
    elif vpn_instance_tasks_list:
        workflow_steps.append(
            chord(
                group(vpn_instance_tasks_list),
                update_group_tasks.si(
                    task_id=task_id,
                    cloud_id=cloud_id,
                    region=region,
                    message="VPN/VSI's Tasks Chord Finisher")))

    workflow_steps.append(
        task_delete_ibm_subnet.si(task_id=task_id,
                                  cloud_id=cloud_id,
                                  region=region,
                                  subnet_id=ibm_subnet.id))

    workflow_steps.append(update_ibm_task.si(task_id=task_id))
    chain(workflow_steps).delay()
Example #34
0
    def post(self, request):
        """
        Create a new Project. Projects and the root File directory for a Project should
        be owned by the portal, with roles/permissions granted to the creating user.

        1. Create the metadata record for the project
        2. Create a directory on the projects storage system named after the metadata uuid
        3. Associate the metadata uuid and file uuid

        :param request:
        :return: The newly created project
        :rtype: JsonResponse
        """

        # portal service account needs to create the objects on behalf of the user
        ag = get_service_account_client()

        if request.is_ajax():
            post_data = json.loads(request.body)
        else:
            post_data = request.POST.copy()

        # create Project (metadata)
        metrics.info('projects',
                     extra={
                         'user': request.user.username,
                         'sessionId': getattr(request.session, 'session_key',
                                              ''),
                         'operation': 'project_create',
                         'info': {
                             'postData': post_data
                         }
                     })
        prj_model = project_lookup_model({
            'name': 'designsafe.project',
            'value': post_data
        })
        prj = prj_model(value=post_data)
        project_uuid = prj.uuid
        prj.manager().set_client(ag)
        prj.save(ag)

        # create Project Directory on Managed system
        metrics.info('projects',
                     extra={
                         'user': request.user.username,
                         'sessionId': getattr(request.session, 'session_key',
                                              ''),
                         'operation': 'base_directory_create',
                         'info': {
                             'systemId': Project.STORAGE_SYSTEM_ID,
                             'uuid': prj.uuid
                         }
                     })
        project_storage_root = BaseFileResource(ag, Project.STORAGE_SYSTEM_ID,
                                                '/')
        project_storage_root.mkdir(prj.uuid)

        # Wrap Project Directory as private system for project
        project_system_tmpl = template_project_storage_system(prj)
        project_system_tmpl['storage']['rootDir'] = \
            project_system_tmpl['storage']['rootDir'].format(project_uuid)
        metrics.info('projects',
                     extra={
                         'user': request.user.username,
                         'sessionId': getattr(request.session, 'session_key',
                                              ''),
                         'operation': 'private_system_create',
                         'info': {
                             'id':
                             project_system_tmpl.get('id'),
                             'site':
                             project_system_tmpl.get('site'),
                             'default':
                             project_system_tmpl.get('default'),
                             'status':
                             project_system_tmpl.get('status'),
                             'description':
                             project_system_tmpl.get('description'),
                             'name':
                             project_system_tmpl.get('name'),
                             'globalDefault':
                             project_system_tmpl.get('globalDefault'),
                             'available':
                             project_system_tmpl.get('available'),
                             'public':
                             project_system_tmpl.get('public'),
                             'type':
                             project_system_tmpl.get('type'),
                             'storage': {
                                 'homeDir':
                                 project_system_tmpl.get('storage',
                                                         {}).get('homeDir'),
                                 'rootDir':
                                 project_system_tmpl.get('storage',
                                                         {}).get('rootDir')
                             }
                         }
                     })
        ag.systems.add(body=project_system_tmpl)

        # grant initial permissions for creating user and PI, if exists
        metrics.info('projects',
                     extra={
                         'user': request.user.username,
                         'sessionId': getattr(request.session, 'session_key',
                                              ''),
                         'operation': 'initial_pems_create',
                         'info': {
                             'collab': request.user.username,
                             'pi': prj.pi
                         }
                     })

        if getattr(prj, 'copi', None):
            prj.add_co_pis(prj.copi)
        elif getattr(prj, 'co_pis', None):
            prj.add_co_pis(prj.co_pis)
        if getattr(prj, 'team', None):
            prj.add_team_members(prj.team)
        elif getattr(prj, 'team_members', None):
            prj.add_team_members(prj.team_members)

        prj._add_team_members_pems([prj.pi])

        if request.user.username not in list(
                set(prj.co_pis + prj.team_members + [prj.pi])):
            # Add creator to project as team member
            prj.add_team_members([request.user.username])

        # Email collaborators
        chain(
            tasks.set_project_id.s(prj.uuid).set(queue="api")
            | tasks.email_collaborator_added_to_project.s(
                prj.uuid, prj.title,
                request.build_absolute_uri('{}/projects/{}/'.format(
                    reverse('designsafe_data:data_depot'), prj.uuid)),
                [
                    u
                    for u in list(set(prj.co_pis + prj.team_members +
                                      [prj.pi])) if u != request.user.username
                ], [])).apply_async()

        tasks.set_facl_project.apply_async(args=[
            prj.uuid,
            list(set(prj.co_pis + prj.team_members + [prj.pi]))
        ],
                                           queue='api')

        prj.add_admin('prjadmin')
        return JsonResponse(prj.to_body_dict(), safe=False)
Example #35
0
    def dispatch_faculty_notifications(self, user_id):
        """
        Dispatch daily summary notifications that are directed at faculty
        :param self:
        :param user_id:
        :return:
        """
        try:
            user = db.session.query(User).filter_by(id=user_id).first()
        except SQLAlchemyError as e:
            current_app.logger.exception("SQLAlchemyError exception",
                                         exc_info=e)
            raise self.retry()

        if user is None:
            self.update_state('FAILURE',
                              meta='Could not read database records')
            raise Ignore()

        # create snapshot of notifications list; this is what we use *everywhere* to decide which notifications
        # to process, in order to avoid race conditions with other threads adding notifications to the database
        raw_list = [(n.id, n.event_type)
                    for n in user.unheld_email_notifications]

        # strip out all notification ids, no matter what type of event we are dealing with
        n_ids = [x[0] for x in raw_list]

        # strip out all notification ids for newly created events
        c_ids = [
            x[0] for x in raw_list
            if x[1] == EmailNotification.CONFIRMATION_REQUEST_CREATED
        ]

        # if we are not grouping notifications into summaries for this user,
        # generate a task to send each email in the queue
        task = None
        if not user.group_summaries:
            if len(n_ids) > 0:
                task = group(
                    dispatch_faculty_single_email.s(user_id, n_id)
                    for n_id in n_ids)

        # we always generate a possible summary, even if the 'group summaries' option is not used,
        # to advise of confirmation requests that are not being handled in a timely fashion.
        has_summary = False
        allow_summary = user.last_email is None
        if not allow_summary:
            time_since_last_email = datetime.now() - user.last_email

            # cutoff is user-specified frequency in days, minus 30 minutes overhead
            # eg. messages sent after a 5pm scheduled mailing would be recording as being issued at
            # 1701 or 1702. The next day, this value for user.last_email would appear to be 23 hours 59/58 mins
            # and prevent another email being issued
            frequency = timedelta(days=user.summary_frequency) - timedelta(
                hours=0.5)
            allow_summary = time_since_last_email > frequency

        if allow_summary:
            cr_ids = _get_outstanding_faculty_confirmation_requests(user)
            summary_ids = n_ids if user.group_summaries else []

            if len(summary_ids) > 0 or len(cr_ids) > 0:
                has_summary = True
                if task is None:
                    task = dispatch_faculty_summary_email.s(
                        None, user.id, summary_ids, cr_ids)
                else:
                    task = task | dispatch_faculty_summary_email.s(
                        user.id, summary_ids, cr_ids)

        # if nothing to do, then return
        if task is None:
            return

        if not has_summary:
            task = task | no_summary_adapter.s()

        # if there *is* something to do, also dispatch paired emails to both faculty and students
        if len(c_ids) > 0:
            # we have to set up this task as a chain so that we can sequentially pass through any
            # list of previously-handled ids
            # If we used a group, each dispatch_new_request_notification() would return its own
            # copy of this list, so we would end up with a list-of-lists in reset_notifications
            if task is None:
                c_id_head = c_ids[0]
                c_ids_tail = c_ids[1:]

                if len(c_ids_tail) > 0:
                    task = dispatch_new_request_notification.s(
                        [], c_id_head) | chain(
                            dispatch_new_request_notification.s(c_id)
                            for c_id in c_ids_tail)
                else:
                    task = dispatch_new_request_notification.s([], c_id_head)
            else:
                task = task | chain(
                    dispatch_new_request_notification.s(c_id)
                    for c_id in c_ids)

        task = task | group(reset_notifications.s(),
                            reset_last_email_time.s(user_id))
        raise self.replace(task)
Example #36
0
def by_chain():
    # 可以将任务链接在一起,以便在一个任务返回后又调用另一个任务,串行
    res = chain(add.s(10) | add.s(20))(10)  # add(add(10, 10), 20)
    print("**********链式处理结果:{0}***********".format(res.get()))
    print("**********链式处理父的结果:{0}***********".format(res.parent.get()))  # 一个parent对应一个父结果
Example #37
0
def build_index_pipeline(file_path='',
                         events='',
                         timeline_name='',
                         index_name='',
                         file_extension='',
                         sketch_id=None,
                         only_index=False,
                         timeline_id=None):
    """Build a pipeline for index and analysis.

    Args:
        file_path: The full path to a file to upload, either a file_path or
            or events need to be defined.
        events: String with the event data, either file_path or events
            needs to be defined.
        timeline_name: Name of the timeline to create.
        index_name: Name of the index to index to.
        file_extension: The file extension of the file.
        sketch_id: The ID of the sketch to analyze.
        only_index: If set to true then only indexing tasks are run, not
            analyzers. This is to be used when uploading data in chunks,
            we don't want to run the analyzers until all chunks have been
            uploaded.
        timeline_id: Optional ID of the timeline object this data belongs to.

    Returns:
        Celery chain with indexing task (or single indexing task) and analyzer
        task group.
    """
    if not (file_path or events):
        raise RuntimeError(
            'Unable to upload data, missing either a file or events.')
    index_task_class = _get_index_task_class(file_extension)
    index_analyzer_chain = _get_index_analyzers()
    sketch_analyzer_chain = None
    searchindex = SearchIndex.query.filter_by(index_name=index_name).first()

    index_task = index_task_class.s(file_path, events, timeline_name,
                                    index_name, file_extension, timeline_id)

    if only_index:
        return index_task

    if sketch_id:
        sketch_analyzer_chain, _ = build_sketch_analysis_pipeline(
            sketch_id, searchindex.id, user_id=None)

    # If there are no analyzers just run the indexer.
    if not index_analyzer_chain and not sketch_analyzer_chain:
        return index_task

    if sketch_analyzer_chain:
        if not index_analyzer_chain:
            return chain(index_task, run_sketch_init.s(),
                         sketch_analyzer_chain)
        return chain(index_task, index_analyzer_chain, run_sketch_init.s(),
                     sketch_analyzer_chain)

    if current_app.config.get('ENABLE_EMAIL_NOTIFICATIONS'):
        return chain(index_task, index_analyzer_chain,
                     run_email_result_task.s())

    return chain(index_task, index_analyzer_chain)
Example #38
0
    def run(self):
        """Executes the current batch definition.

        Expands the current batch definition to a series of celery chains and
        executes them asynchronously. Additionally a batch record is written to
        the celery result backend.

        Returns:
            (unicode): Batch identifier.
        """
        if self.lock:
            raise NidabaInputException('Executed batch may not be modified')

        # reorder task definitions
        keys = [
            'img', 'binarize', 'segmentation', 'ocr', 'stats',
            'postprocessing', 'output', 'archive'
        ]
        tasks = OrderedDict((key, self.tasks[key]) for key in keys)
        first = []
        prev = None
        result_data = {}
        self.lock = True

        # build chain
        root_docs = self.docs
        prev = []
        for group, step in tasks.iteritems():
            # skip groups without tasks
            if not step:
                continue
            sequential = True if self.order[group][0] == 'sequence' else False
            mmode = self.order[group][1]

            def _repeat(lst, n):
                return list(
                    itertools.chain.from_iterable(
                        itertools.repeat(x, n) for x in lst))

            if sequential:
                step = [step]
            # multiply number of tasks in this step by number of tasks in
            # previous step if not merging
            if not mmode:
                step = _repeat(step, len(root_docs))
                root_docs = root_docs * (len(step) / len(root_docs))
            # by number of root docs if doc merging
            elif mmode == 'doc':
                step = _repeat(step, len(self.docs))
                root_docs = self.docs
            else:
                root_docs = [root_docs] * len(step)
            if not sequential:
                step = [[x] for x in step]
            nprev = []
            r = []
            for rd_idx, (rdoc, c) in enumerate(zip(root_docs, step)):
                if sequential:
                    r.append([])
                for idx, (fun, kwargs) in enumerate(c):
                    # if idx > 0 (sequential == true) parent is previous task in sequence
                    if idx > 0:
                        parents = [task_id]
                    # if merge mode is 'doc' base parents are tasks n * (len(prev)/len(docs)) to n+1 ...
                    elif mmode == 'doc':
                        parents = prev[rd_idx::len(root_docs)]
                    # if merging everything all tasks in previous step are parents
                    elif mmode:
                        parents = prev
                    # if not merging a single task in previous step is the parent
                    elif mmode is False:
                        parents = [prev[rd_idx % len(prev)]] if prev else prev
                    task_id = uuid.uuid4().get_hex()
                    # last task in a sequence is entered into new prev array
                    if idx + 1 == len(c):
                        nprev.append(task_id)
                    result_data[task_id] = {
                        'children': [],
                        'parents': parents,
                        'root_documents': rdoc if mmode else [rdoc],
                        'state': 'PENDING',
                        'result': None,
                        'task': (group, fun, kwargs)
                    }
                    for parent in parents:
                        result_data[parent]['children'].append(task_id)
                    task = self.celery.app.tasks[u'nidaba.{}.{}'.format(
                        group, fun)]
                    if sequential:
                        r[-1].append(
                            task.s(batch_id=self.id, task_id=task_id,
                                   **kwargs))
                    else:
                        r.append(
                            task.s(batch_id=self.id, task_id=task_id,
                                   **kwargs))
            prev = nprev
            t = self.celery.app.tasks[u'nidaba.util.barrier'].s(
                merging=mmode,
                sequential=sequential,
                replace=r,
                root_docs=self.docs)
            first.append(t)
        with self.redis.pipeline() as pipe:
            while (1):
                try:
                    pipe.watch(self.id)
                    self._restore_and_create_scratchpad(pipe)
                    # also deletes the scratchpad
                    pipe.set(self.id, json.dumps(result_data))
                    pipe.execute()
                    break
                except WatchError:
                    continue
        chain(first).apply_async(args=[self.docs])
        return self.id
Example #39
0
def do_transaction(list_signatures):
    task_list = []
    for sig in list_signatures:
        task_list += [sig, monitor.s()]
    res = chain(*task_list)
    return res()
Example #40
0
 def test_chain_of_chain_with_a_single_task(self, manager):
     sig = signature('any_taskname', queue='any_q')
     chain([chain(sig)]).apply_async()
Example #41
0
 def test_rebuild_nested_group_group(self, manager):
     sig = chain(tasks.return_nested_signature_group_group.s(),
                 tasks.rebuild_signature.s())
     sig.delay().get(timeout=TIMEOUT)
Example #42
0
    def get_task_signature(
        cls, tenant, serialized_tenant, pull_security_groups=True, **kwargs
    ):
        """ Create tenant, add user to it, create internal network, pull quotas """
        # we assume that tenant one network and subnet after creation
        network = tenant.networks.first()
        subnet = network.subnets.first()
        serialized_network = core_utils.serialize_instance(network)
        serialized_subnet = core_utils.serialize_instance(subnet)
        creation_tasks = [
            core_tasks.BackendMethodTask().si(
                serialized_tenant,
                'create_tenant_safe',
                state_transition='begin_creating',
            ),
            core_tasks.BackendMethodTask().si(
                serialized_tenant, 'add_admin_user_to_tenant'
            ),
            core_tasks.BackendMethodTask().si(serialized_tenant, 'create_tenant_user'),
            core_tasks.BackendMethodTask().si(
                serialized_network, 'create_network', state_transition='begin_creating'
            ),
            core_tasks.BackendMethodTask().si(
                serialized_subnet, 'create_subnet', state_transition='begin_creating'
            ),
        ]
        quotas = tenant.quotas.all()
        quotas = {
            q.name: int(q.limit) if q.limit.is_integer() else q.limit for q in quotas
        }
        creation_tasks.append(
            core_tasks.BackendMethodTask().si(
                serialized_tenant, 'push_tenant_quotas', quotas
            )
        )
        # handle security groups
        # XXX: Create default security groups that was connected to SPL earlier.
        for security_group in tenant.security_groups.all():
            creation_tasks.append(
                SecurityGroupCreateExecutor.as_signature(security_group)
            )

        if pull_security_groups:
            creation_tasks.append(
                core_tasks.BackendMethodTask().si(
                    serialized_tenant, 'pull_tenant_security_groups'
                )
            )

        # initialize external network if it defined in service settings
        service_settings = tenant.service_project_link.service.settings
        customer = tenant.service_project_link.project.customer
        external_network_id = service_settings.get_option('external_network_id')

        try:
            customer_openstack = models.CustomerOpenStack.objects.get(
                settings=service_settings, customer=customer
            )
            external_network_id = customer_openstack.external_network_id
        except models.CustomerOpenStack.DoesNotExist:
            pass

        if external_network_id and not kwargs.get('skip_connection_extnet'):
            creation_tasks.append(
                core_tasks.BackendMethodTask().si(
                    serialized_tenant,
                    'connect_tenant_to_external_network',
                    external_network_id=external_network_id,
                )
            )
            creation_tasks.append(
                core_tasks.BackendMethodTask().si(
                    serialized_tenant, backend_method='pull_tenant_routers',
                )
            )

        creation_tasks.append(
            core_tasks.BackendMethodTask().si(serialized_tenant, 'pull_tenant_quotas')
        )
        return chain(*creation_tasks)
Example #43
0
def lookup_contact(contact_number):
    contact = Contact.objects.get(phone_number=contact_number)

    return chain(lookup_contact_whitepages.si(contact.id)).apply_async()
Example #44
0
 def user_created(self, user_name):
     self._create_datastore_dir(user_name)
     res = chain(create_workspace.si(user_name), create_datastore.si(user_name))
     res.delay()
     LOGGER.info("Start monitoring datastore of created user [%s]", user_name)
     Monitoring().register(self._shapefile_folder_dir(user_name), True, Geoserver)
Example #45
0
def run_both(url: str):
    """A task that calls other tasks."""
    url += f"?t={int(time.time())}"
    chain(first_part.s(url), second_part.s()).delay()
Example #46
0
def file_wrap_up_chained(_, snap_id, group_id):
    chain(upload_files_to_s3_task.s(snap_id=snap_id, group_id=group_id),
          clean_up_files_chained.s(snap_id=snap_id,
                                   group_id=group_id)).apply_async()
Example #47
0
def get_stock_info(self, func, param, arg):
    try:
        chain(get_stock_data.s(func, param),
              output_datas.s(arg)).apply_async()
    except Exception as exc:
        raise self.retry(exc=exc, coutdown=10, max_retries=3)
Example #48
0
def create_request():
    """
    Submit a request to resolve and cache the given source code and its dependencies.

    :param str repo: the URL to the SCM repository
    :param str ref: the SCM reference to fetch
    :param list<str> pkg_managers: list of package managers to be used for resolving dependencies
    :param list<str> flags: list of flag names
    :rtype: flask.Response
    :raise ValidationError: if required parameters are not supplied
    """
    payload = flask.request.get_json()
    if not isinstance(payload, dict):
        raise ValidationError('The input data must be a JSON object')

    request = Request.from_json(payload)
    if not re.match(r'^[a-f0-9]{40}', request.ref):
        raise ValidationError(
            'The "ref" parameter must be a 40 character hex string')
    db.session.add(request)
    db.session.commit()

    if current_user.is_authenticated:
        flask.current_app.logger.info('The user %s submitted request %d',
                                      current_user.username, request.id)
    else:
        flask.current_app.logger.info('An anonymous user submitted request %d',
                                      request.id)

    pkg_manager_names = set(pkg_manager.name
                            for pkg_manager in request.pkg_managers)
    auto_detect = len(pkg_manager_names) == 0
    if auto_detect:
        flask.current_app.logger.info(
            'Automatic detection will be used since "pkg_managers" was empty')

    # Chain tasks
    error_callback = tasks.failed_request_callback.s(request.id)
    chain_tasks = [
        tasks.fetch_app_source.s(request.repo, request.ref,
                                 request.id).on_error(error_callback),
    ]
    if 'gomod' in pkg_manager_names or auto_detect:
        gomod_dependency_replacements = [
            dependency_replacement for dependency_replacement in payload.get(
                'dependency_replacements', [])
            if dependency_replacement['type'] == 'gomod'
        ]
        chain_tasks.append(
            tasks.fetch_gomod_source.si(
                request.id,
                auto_detect,
                gomod_dependency_replacements,
            ).on_error(error_callback))

    chain_tasks.extend([
        tasks.create_bundle_archive.si(request.id).on_error(error_callback),
        tasks.set_request_state.si(request.id, 'complete',
                                   'Completed successfully'),
    ])

    chain(chain_tasks).delay()
    flask.current_app.logger.debug('Successfully scheduled request %d',
                                   request.id)
    return flask.jsonify(request.to_json()), 201
Example #49
0
    def extract_addon(self, entry, batch_size=BATCH_SIZE):
        """
        This method takes a GitExtractionEntry object and creates a chain of
        Celery tasks to extract each version in a git repository that haven't
        been extracted yet (including the deleted versions).

        It does not run if the add-on is locked for git extraction.
        """
        addon = entry.addon
        log.info('Starting git extraction of add-on "%s".', addon.pk)

        # See: https://github.com/mozilla/addons-server/issues/14289
        if addon.type != amo.ADDON_EXTENSION:
            log.info(
                'Skipping git extraction of add-on "%s": not an extension.',
                addon.pk,
            )
            entry.delete()
            return

        # We cannot use `entry.in_progress` because we have to be sure of the
        # add-on state and `entry` might not reflect the most up-to-date
        # database state here.
        if addon.git_extraction_is_in_progress:
            log.info(
                'Aborting extraction of addon "%s" to git storage '
                'because it is already in progress.',
                addon.pk,
            )
            return

        log.info('Locking add-on "%s" before extraction.', addon.pk)
        entry.update(in_progress=True)

        # Retrieve all the version pks to extract, sorted by creation date.
        versions_to_extract = (addon.versions(
            manager='unfiltered_for_relations').filter(
                file__is_webextension=True,
                git_hash='').order_by('created').values_list('pk', flat=True))

        if len(versions_to_extract) == 0:
            log.info('No version to git-extract for add-on "%s", exiting.',
                     addon.pk)
            # We can safely delete the entry because there is no version to
            # extract.
            entry.delete()
            return

        version_pks = versions_to_extract[0:batch_size]
        tasks = [
            # Create a task to extract the BATCH_SIZE first versions.
            extract_versions_to_git.si(addon_pk=addon.pk,
                                       version_pks=version_pks)
        ]
        if len(version_pks) < len(versions_to_extract):
            # If there are more versions to git-extract, let's keep the entry
            # in the queue until we're done with this entry/add-on. The
            # `continue_git_extraction` task will set the `in_progress` flag to
            # `False` and this CRON task will pick the remaining versions to
            # git-extract the next time it runs.
            tasks.append(continue_git_extraction.si(addon.pk))
        else:
            # If we do not have more versions to git-extract here, we can
            # remove the entry from the queue.
            tasks.append(remove_git_extraction_entry.si(addon.pk))

        log.info(
            'Submitted %s tasks to git-extract %s versions for add-on "%s".',
            len(tasks),
            len(versions_to_extract),
            addon.pk,
        )
        # Attach an error handler on the chain and run it. The error
        # handler should remove the add-on lock (among other things).
        chain(*tasks).on_error(on_extraction_error.s(addon.pk)).delay()
Example #50
0
    def put(self, request):
        received_data = request.data

        if received_data.get('file') is None or received_data.get('exchange') is None:
            raise ParseError('Пропущен один из параметров!')

        file = save_uploaded_file(request.FILES['file'])
        request_exchange = received_data['exchange'].lower()

        exchanges = [item[0] for item in StatsUploadEvent.EXCHANGES]

        if request_exchange not in exchanges:
            raise ParseError('Параметр exchange невалиден!')

        parse_task_id, update_profit_task_id = uuid(), uuid()

        StatsUploadEvent.objects.create(
            parse_task_id=parse_task_id,
            update_profit_task_id=update_profit_task_id,
            exchange=request_exchange,
            file=file,
            uploaded_by=request.user
        )

        if request_exchange == 'poloniex':
            task = chain(
                ParsePoloniexStatsTask().subtask(
                    kwargs={
                        'file': file,
                        'task_id': parse_task_id,
                    },
                    task_id=parse_task_id,
                ),
                TradeProfitRecalculationTask().subtask(
                    kwargs={
                        'task_id': update_profit_task_id,
                    },
                    task_id=update_profit_task_id,
                )
            ).apply_async()

        elif request_exchange == 'bittrex':
            raise ParseError('Парсер Bittrex в разработке!')

            # task = chain(
            #     ParseBittrexStatsTask().subtask(
            #         kwargs={
            #             'file': file,
            #             'task_id': parse_task_id,
            #         },
            #         task_id=parse_task_id,
            #     ),
            #     TradeProfitRecalculationTask().subtask(
            #         kwargs={
            #             'task_id': update_profit_task_id,
            #         },
            #         task_id=update_profit_task_id,
            #     )
            # ).apply_async()

        elif request_exchange == 'binance':
            # raise ParseError('Парсер Binance в разработке!')

            task = chain(
                ParseBinanceStatsTask().subtask(
                    kwargs={
                        'file': file,
                        'task_id': parse_task_id,
                    },
                    task_id=parse_task_id,
                ),
                TradeProfitRecalculationTask().subtask(
                    kwargs={
                        'task_id': update_profit_task_id,
                    },
                    task_id=update_profit_task_id,
                )
            ).apply_async()

        return Response('ok', status=204)
Example #51
0
def build_sketch_analysis_pipeline(sketch_id,
                                   searchindex_id,
                                   user_id,
                                   analyzer_names=None,
                                   analyzer_kwargs=None,
                                   timeline_id=None):
    """Build a pipeline for sketch analysis.

    If no analyzer_names is passed in then we assume auto analyzers should be
    run and get this list from the configuration. Parameters to the analyzers
    can be passed in to this function, otherwise they will be taken from the
    configuration. Either default kwargs for auto analyzers or defaults for
    manually run analyzers.

    Args:
        sketch_id (int): The ID of the sketch to analyze.
        searchindex_id (int): The ID of the searchindex to analyze.
        user_id (int): The ID of the user who started the analyzer.
        analyzer_names (list): List of analyzers to run.
        analyzer_kwargs (dict): Arguments to the analyzers.
        timeline_id (int): Optional int of the timeline to run the analyzer on.

    Returns:
        A tuple with a Celery group with analysis tasks or None if no analyzers
        are enabled and an analyzer session ID.
    """
    tasks = []

    if not analyzer_names:
        analyzer_names = current_app.config.get('AUTO_SKETCH_ANALYZERS', [])
        if not analyzer_kwargs:
            analyzer_kwargs = current_app.config.get(
                'AUTO_SKETCH_ANALYZERS_KWARGS', {})

    # Exit early if no sketch analyzers are configured to run.
    if not analyzer_names:
        return None, None

    if not analyzer_kwargs:
        analyzer_kwargs = current_app.config.get('ANALYZERS_DEFAULT_KWARGS',
                                                 {})

    if user_id:
        user = User.query.get(user_id)
    else:
        user = None

    sketch = Sketch.query.get(sketch_id)
    analysis_session = AnalysisSession(user, sketch)

    analyzers = manager.AnalysisManager.get_analyzers(analyzer_names)
    for analyzer_name, analyzer_cls in analyzers:
        if not analyzer_cls.IS_SKETCH_ANALYZER:
            continue

        kwargs = analyzer_kwargs.get(analyzer_name, {})
        searchindex = SearchIndex.query.get(searchindex_id)

        timeline = None
        if timeline_id:
            timeline = Timeline.query.get(timeline_id)

        if not timeline:
            timeline = Timeline.query.filter_by(
                sketch=sketch, searchindex=searchindex).first()

        analysis = Analysis(name=analyzer_name,
                            description=analyzer_name,
                            analyzer_name=analyzer_name,
                            parameters=json.dumps(kwargs),
                            user=user,
                            sketch=sketch,
                            timeline=timeline)
        analysis.set_status('PENDING')
        analysis_session.analyses.append(analysis)
        db_session.add(analysis)
        db_session.commit()

        tasks.append(
            run_sketch_analyzer.s(sketch_id,
                                  analysis.id,
                                  analyzer_name,
                                  timeline_id=timeline_id,
                                  **kwargs))

    # Commit the analysis session to the database.
    db_session.add(analysis_session)
    db_session.commit()

    if current_app.config.get('ENABLE_EMAIL_NOTIFICATIONS'):
        tasks.append(run_email_result_task.s(sketch_id))

    if not tasks:
        return None, None

    return chain(tasks), analysis_session
Example #52
0
    def __init__(self, file_, addon=None, listed=None, final_task=None):
        self.addon = addon
        self.file = None
        self.prev_file = None

        if isinstance(file_, FileUpload):
            assert listed is not None
            channel = (amo.RELEASE_CHANNEL_LISTED if listed else
                       amo.RELEASE_CHANNEL_UNLISTED)
            is_mozilla_signed = False

            # We're dealing with a bare file upload. Try to extract the
            # metadata that we need to match it against a previous upload
            # from the file itself.
            try:
                addon_data = parse_addon(file_, minimal=True)
                is_mozilla_signed = addon_data.get(
                    'is_mozilla_signed_extension', False)
            except ValidationError as form_error:
                log.info('could not parse addon for upload {}: {}'
                         .format(file_.pk, form_error))
                addon_data = None
            else:
                file_.update(version=addon_data.get('version'))

            assert not file_.validation

            validation_tasks = self.create_file_upload_tasks(
                upload_pk=file_.pk,
                channel=channel,
                is_mozilla_signed=is_mozilla_signed
            )
        elif isinstance(file_, File):
            # The listed flag for a File object should always come from
            # the status of its owner Addon. If the caller tries to override
            # this, something is wrong.
            assert listed is None

            channel = file_.version.channel
            is_mozilla_signed = file_.is_mozilla_signed_extension

            self.file = file_
            self.addon = self.file.version.addon
            addon_data = {'guid': self.addon.guid,
                          'version': self.file.version.version}

            validation_tasks = [
                tasks.create_initial_validation_results.si(),
                tasks.validate_file.s(file_.pk),
                tasks.handle_file_validation_result.s(file_.pk)
            ]
        else:
            raise ValueError

        if final_task:
            validation_tasks.append(final_task)

        self.task = chain(*validation_tasks)

        # Create a cache key for the task, so multiple requests to validate the
        # same object do not result in duplicate tasks.
        opts = file_._meta
        self.cache_key = 'validation-task:{0}.{1}:{2}:{3}'.format(
            opts.app_label, opts.object_name, file_.pk, listed)
Example #53
0
"""
    Models an analysis request for a piece of music including its location
"""
song_fields = api.model(
    'SongModel', {
        'source_path':
        fields.String(description='The path of the song to analyze',
                      required=True),
        'force':
        fields.Boolean(description='Should every analysis run', required=False)
    })

pipeline = chain(
    check_done.s().set(priority=1),
    add_bpm.s().set(priority=2),
    add_emotions.s().set(priority=3),
    add_metering.s().set(priority=4),
    add_similarity_features.s().set(priority=5),
)


def add_to_pipeline(data, song_path):
    if song_path.endswith(("mp3", "wav")):
        id = get_song_id(song_path)
        force = data['force'] if 'force' in data else False
        config = data['config'] if 'config' in data else {}
        song = dict({
            'audio_id': id,
            'source_path': song_path,
            'FORCE': force,
            'config': config,
Example #54
0
 def test_priority(self, manager):
     c = chain(return_priority.signature(priority=3))()
     assert c.get(timeout=TIMEOUT) == "Priority: 3"
Example #55
0
 def get_deletion_task(cls, instance, serialized_instance):
     serialized_vm = core_utils.serialize_instance(instance.vm)
     return chain(
         core_tasks.DeletionTask().si(serialized_instance),
         VirtualMachineUpdatedNotificationTask().si(serialized_vm),
     )
Example #56
0
 def test_single_chain(self, manager):
     c = chain(add.s(3, 4))()
     assert c.get(timeout=TIMEOUT) == 7
Example #57
0
def appliance_action(request, appliance_id, action, x=None):
    if not request.user.is_authenticated():
        return go_home(request)
    try:
        appliance = Appliance.objects.get(id=appliance_id)
    except ObjectDoesNotExist:
        messages.error(
            request,
            'Appliance with ID {} does not exist!.'.format(appliance_id))
        return go_back_or_home(request)
    if not can_operate_appliance_or_pool(appliance, request.user):
        messages.error(
            request,
            'This appliance belongs either to some other user or nobody.')
        return go_back_or_home(request)
    if action == "start":
        if appliance.power_state != Appliance.Power.ON:
            chain(appliance_power_on.si(appliance.id),
                  (wait_appliance_ready if appliance.preconfigured else
                   mark_appliance_ready).si(appliance.id))()
            messages.success(request, 'Initiated launch of appliance.')
            return go_back_or_home(request)
        else:
            messages.info(request, 'Appliance was already powered on.')
            return go_back_or_home(request)
    elif action == "reboot":
        if appliance.power_state == Appliance.Power.ON:
            chain(appliance_reboot.si(appliance.id),
                  (wait_appliance_ready if appliance.preconfigured else
                   mark_appliance_ready).si(appliance.id))()
            messages.success(request, 'Initiated reboot of appliance.')
            return go_back_or_home(request)
        else:
            messages.error(request,
                           'Only powered on appliances can be rebooted')
            return go_back_or_home(request)
    elif action == "stop":
        if appliance.power_state != Appliance.Power.OFF:
            appliance_power_off.delay(appliance.id)
            messages.success(request, 'Initiated stop of appliance.')
            return go_back_or_home(request)
        else:
            messages.info(request, 'Appliance was already powered off.')
            return go_back_or_home(request)
    elif action == "suspend":
        if appliance.power_state != Appliance.Power.SUSPENDED:
            appliance_suspend.delay(appliance.id)
            messages.success(request, 'Initiated suspend of appliance.')
            return go_back_or_home(request)
        else:
            messages.info(request, 'Appliance was already suspended.')
            return go_back_or_home(request)
    elif action == "kill":
        Appliance.kill(appliance)
        messages.success(request, 'Kill initiated.')
        return go_back_or_home(request)
    elif action == "dont_expire":
        if not request.user.is_superuser:
            messages.error(
                request,
                'Disabling expiration time is allowed only for superusers.')
            return go_back_or_home(request)
        with transaction.atomic():
            appliance.leased_until = None
            appliance.save()
        messages.success(request, 'Lease disabled successfully. Be careful.')
        return go_back_or_home(request)
    elif action == "set_lease":
        if not can_operate_appliance_or_pool(appliance, request.user):
            messages.error(
                request,
                'This appliance belongs either to some other user or nobody.')
            return go_back_or_home(request)
        appliance.prolong_lease(time=int(x))
        messages.success(request, 'Lease prolonged successfully.')
        return go_back_or_home(request)
    else:
        messages.error(request, "Unknown action '{}'".format(action))
    def _run_reindex_tasks(self, models, queue):
        apply_async_kwargs = {'priority': 0}
        if queue:
            log.info('Adding indexing tasks to queue %s', queue)
            apply_async_kwargs['queue'] = queue
        else:
            log.info('Adding indexing tasks to default queue')

        index_time = timezone.now()
        timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')

        for doc in registry.get_documents(models):
            queryset = doc().get_queryset()
            # Get latest object from the queryset

            app_label = queryset.model._meta.app_label
            model_name = queryset.model.__name__

            index_name = doc._index._name
            new_index_name = "{}_{}".format(index_name, timestamp)
            # Set index temporarily for indexing,
            # this will only get set during the running of this command
            doc._index._name = new_index_name

            pre_index_task = create_new_es_index.si(
                app_label=app_label,
                model_name=model_name,
                index_name=index_name,
                new_index_name=new_index_name)

            indexing_tasks = self._get_indexing_tasks(
                app_label=app_label,
                model_name=model_name,
                queryset=queryset,
                index_name=new_index_name,
                document_class=str(doc))

            post_index_task = switch_es_index.si(app_label=app_label,
                                                 model_name=model_name,
                                                 index_name=index_name,
                                                 new_index_name=new_index_name)

            # Task to run in order to add the objects
            # that has been inserted into database while indexing_tasks was running
            # We pass the creation time of latest object, so its possible to index later items
            missed_index_task = index_missing_objects.si(
                app_label=app_label,
                model_name=model_name,
                document_class=str(doc),
                index_generation_time=index_time)

            # http://celery.readthedocs.io/en/latest/userguide/canvas.html#chords
            chord_tasks = chord(header=indexing_tasks, body=post_index_task)
            if queue:
                pre_index_task.set(queue=queue)
                chord_tasks.set(queue=queue)
                missed_index_task.set(queue=queue)
            # http://celery.readthedocs.io/en/latest/userguide/canvas.html#chain
            chain(pre_index_task, chord_tasks,
                  missed_index_task).apply_async(**apply_async_kwargs)

            message = (
                "Successfully issued tasks for {}.{}, total {} items".format(
                    app_label, model_name, queryset.count()))
            log.info(message)
Example #59
0
def main():
    duplicate = multiply.s(2)

    task = chain(multiply.s(4, 5), multiply.s(2))
    promise = task.delay()
    print(promise.get())
Example #60
0
def mail(email: str, subject: str, template: Union[str, LazyI18nString],
         context: Dict[str, Any]=None, event: Event=None, locale: str=None,
         order: Order=None, headers: dict=None, sender: str=None, invoices: list=None,
         attach_tickets=False):
    """
    Sends out an email to a user. The mail will be sent synchronously or asynchronously depending on the installation.

    :param email: The email address of the recipient

    :param subject: The email subject. Should be localized to the recipients's locale or a lazy object that will be
        localized by being casted to a string.

    :param template: The filename of a template to be used. It will be rendered with the locale given in the locale
        argument and the context given in the next argument. Alternatively, you can pass a LazyI18nString and
        ``context`` will be used as the argument to a  Python ``.format_map()`` call on the template.

    :param context: The context for rendering the template (see ``template`` parameter)

    :param event: The event this email is related to (optional). If set, this will be used to determine the sender,
        a possible prefix for the subject and the SMTP server that should be used to send this email.

    :param order: The order this email is related to (optional). If set, this will be used to include a link to the
        order below the email.

    :param headers: A dict of custom mail headers to add to the mail

    :param locale: The locale to be used while evaluating the subject and the template

    :param sender: Set the sender email address. If not set and ``event`` is set, the event's default will be used,
        otherwise the system default.

    :param invoices: A list of invoices to attach to this email.

    :param attach_tickets: Whether to attach tickets to this email, if they are available to download.

    :raises MailOrderException: on obvious, immediate failures. Not raising an exception does not necessarily mean
        that the email has been sent, just that it has been queued by the email backend.
    """
    if email == INVALID_ADDRESS:
        return

    headers = headers or {}

    with language(locale):
        if isinstance(context, dict) and event:
            for k, v in event.meta_data.items():
                context['meta_' + k] = v

        if isinstance(context, dict) and order:
            try:
                context.update({
                    'invoice_name': order.invoice_address.name,
                    'invoice_company': order.invoice_address.company
                })
            except InvoiceAddress.DoesNotExist:
                context.update({
                    'invoice_name': '',
                    'invoice_company': ''
                })
        renderer = ClassicMailRenderer(None)
        content_plain = body_plain = render_mail(template, context)
        subject = str(subject).format_map(context)
        sender = sender or (event.settings.get('mail_from') if event else settings.MAIL_FROM)
        if event:
            sender = formataddr((str(event.name), sender))
        else:
            sender = formataddr((settings.PRETIX_INSTANCE_NAME, sender))

        subject = str(subject)
        signature = ""

        bcc = []
        if event:
            renderer = event.get_html_mail_renderer()
            if event.settings.mail_bcc:
                bcc.append(event.settings.mail_bcc)

            if event.settings.mail_from == settings.DEFAULT_FROM_EMAIL and event.settings.contact_mail and not headers.get('Reply-To'):
                headers['Reply-To'] = event.settings.contact_mail

            prefix = event.settings.get('mail_prefix')
            if prefix and prefix.startswith('[') and prefix.endswith(']'):
                prefix = prefix[1:-1]
            if prefix:
                subject = "[%s] %s" % (prefix, subject)

            body_plain += "\r\n\r\n-- \r\n"

            signature = str(event.settings.get('mail_text_signature'))
            if signature:
                signature = signature.format(event=event.name)
                body_plain += signature
                body_plain += "\r\n\r\n-- \r\n"

            if order:
                if order.testmode:
                    subject = "[TESTMODE] " + subject
                body_plain += _(
                    "You are receiving this email because you placed an order for {event}."
                ).format(event=event.name)
                body_plain += "\r\n"
                body_plain += _(
                    "You can view your order details at the following URL:\n{orderurl}."
                ).replace("\n", "\r\n").format(
                    event=event.name, orderurl=build_absolute_uri(
                        order.event, 'presale:event.order', kwargs={
                            'order': order.code,
                            'secret': order.secret
                        }
                    )
                )
            body_plain += "\r\n"

        try:
            body_html = renderer.render(content_plain, signature, str(subject), order)
        except:
            logger.exception('Could not render HTML body')
            body_html = None

        send_task = mail_send_task.si(
            to=[email],
            bcc=bcc,
            subject=subject,
            body=body_plain,
            html=body_html,
            sender=sender,
            event=event.id if event else None,
            headers=headers,
            invoices=[i.pk for i in invoices] if invoices else [],
            order=order.pk if order else None,
            attach_tickets=attach_tickets
        )

        if invoices:
            task_chain = [invoice_pdf_task.si(i.pk).on_error(send_task) for i in invoices if not i.file]
        else:
            task_chain = []

        task_chain.append(send_task)
        chain(*task_chain).apply_async()