Ejemplo n.º 1
0
def iterate_report_lines_in_task(report: Report, task: Task):
    """Iterate through the lines in a :class:`Report` while keeping a celery :class:`Task` informed of progress."""
    lines = report.get_lines()
    len_lines = len(lines)
    for i, line in enumerate(lines, start=1):
        if not task.request.called_directly:
            task.update_state(state='PROGRESS',
                              meta={
                                  'task': 'parsing',
                                  'current_line_number': i,
                                  'current_line': line,
                                  'total_lines': len_lines,
                              })
        yield line
Ejemplo n.º 2
0
    def handle(self, *args, **options):
        settings.LOG_TO_STREAM = True
        now = datetime.datetime.utcnow()
        
        feeds = Feed.objects.filter(
            next_scheduled_update__lte=now, 
            active=True
        ).exclude(
            active_subscribers=0
        ).order_by('?')
        
        if options['force']:
            feeds = Feed.objects.all().order_by('pk')

        print " ---> Tasking %s feeds..." % feeds.count()
        
        publisher = Task.get_publisher()

        feed_queue = []
        size = 12
        for f in feeds:
            f.queued_date = datetime.datetime.utcnow()
            f.set_next_scheduled_update()

        for feed_queue in (feeds[pos:pos + size] for pos in xrange(0, len(feeds), size)):
            print feed_queue
            feed_ids = [feed.pk for feed in feed_queue]
            print feed_ids
            UpdateFeeds.apply_async(args=(feed_ids,), queue='update_feeds', publisher=publisher)

        publisher.connection.close()
def crawl(id=None, screen_name=None, cursor=-1, crawl_mode=False, routing_key=DEFAULT_ROUTING_KEY):
    logger = Task.get_logger()
    logger.info('[crawl] starting crawl(id=%s, screen_name=%s, cursor=%s)' % (id, screen_name, cursor))
    twitter = get_twitter()
    params = {
        "cursor": cursor
    }
    if id:
        params['user_id'] = id
    elif screen_name:
        params['screen_name'] = screen_name
    result = twitter.followers.ids(**params)

    # block while we grab the current user's info
    del params['cursor']
    source_id = twitter.users.show(**params)['id']
    source = sync_user(source_id)

    for id in result['ids']:
        sync_user.apply_async(args=[id,], kwargs={'source':source, 'crawl_mode':crawl_mode}, routing_key=routing_key)

    if result['next_cursor']:
        logger.info("[crawl] continuing at next_cursor=%s" % result['next_cursor'])
        crawl.apply_async(
            args=[twitter_id,],
            kwargs={
                'cursor':result['next_cursor'], 
                'crawl_mode':crawl_mode, 
                'routing_key':routing_key,
            }, 
            routing_key=routing_key
        )
Ejemplo n.º 4
0
def execute_transform(spill, client_id = "Unknown"):
    """
    MAUS Celery transform task used by sub-processes to execute jobs
    from clients. Proxies of this task are invoked by clients.This
    applies the current transform to the spill and returns the new
    spill.  
    @param spill JSON document string holding spill.
    @param client_id ID of client who submitted job.
    @return JSON document string holding new spill.
    @throws Exception if there is a problem when process is called.
    """
    logger = Task.get_logger()  
    if logger.isEnabledFor(logging.INFO):
        logger.info("Task invoked by %s" % client_id)
    try:
        spill_json = json.loads(spill)
        if "maus_event_type" in spill_json.keys() and \
           spill_json["maus_event_type"] != "Spill":
            return spill
        else:
            return maus_cpp.converter.string_repr(MausTransform.process(spill))
    except Exception as exc: # pylint:disable = W0703
        # Filter exceptions so no unPicklable exception causes
        # problems.
        status = {}
        status["error"] = str(exc.__class__)
        status["message"] = str(exc)
        raise WorkerProcessException(MausConfiguration.transform,
            status)
Ejemplo n.º 5
0
def progress(request):
    """ Check status of task. """
    if 'delete' in request.GET:
        models.MachineCache.objects.all().delete()
        models.InstituteCache.objects.all().delete()
        models.PersonCache.objects.all().delete()
        models.ProjectCache.objects.all().delete()
        return render_to_response('main.html', {'content': 'Deleted'},
                                  context_instance=RequestContext(request))

    if request.method == 'POST':
        if 'task_id' in request.POST:
            result = Task.AsyncResult(request.POST['task_id'])
            if result.failed():
                value = {
                    'info': {},
                    'ready': result.ready(),
                }
            else:
                value = {
                    'info': result.info,
                    'ready': result.ready(),
                }
            return HttpResponse(json.dumps(value),
                                content_type="application/json")
    return None
Ejemplo n.º 6
0
Archivo: tasks.py Proyecto: koh1/sim_ds
def retrieve_mbs_result(target_task_id):
    logger = Task.get_logger()
    r = AsyncResult(target_task_id)
    sr = SimulationResult.objects.get(task_id__exact=target_task_id)
#    sr = SimulationResult.objects.get(sim_id__exact=r['sim_id'])
    logger.info(r)

    while not r.ready():
        time.sleep(0.1)

    result = json.loads(r.result)
    
    if result['exit_code'] == 0:
        ## success
        sr.sim_id = result['sim_id']
        
        ## these are rewrite if you add log collections
        sr.collections = json.dumps([
                "%s_nwk" % sr.sim_id,
                "%s_node" % sr.sim_id,
                "%s_msg" % sr.sim_id,
                "%s_usr" % sr.sim_id,
                "%s_map" % sr.sim_id,
                ])
        sr.task_progress = 100
        sr.task_status = "SUCCESS"
        sr.save()
    else:
        sr.sim_id = "NO SIM_ID (FAILED)"
        sr.task_status = "FAILED"
        sr.task_progress = 0
        sr.save()
Ejemplo n.º 7
0
 def queue_new_feeds(self):
     new_feeds = UserSubscription.objects.filter(user=self.user, feed__fetched_once=False).values("feed_id")
     new_feeds = list(set([f["feed_id"] for f in new_feeds]))
     logging.info(" ---> [%s] Queueing NewFeeds: (%s) %s" % (self.user, len(new_feeds), new_feeds))
     size = 4
     publisher = Task.get_publisher(exchange="new_feeds")
     for t in (new_feeds[pos : pos + size] for pos in xrange(0, len(new_feeds), size)):
         NewFeeds.apply_async(args=(t,), queue="new_feeds", publisher=publisher)
     publisher.connection.close()
Ejemplo n.º 8
0
 def __call__(self, *args, **kwargs):
     # if you don't provide "user" as a kwarg, this just acts like
     # a normal ol' boring task
     if "user" in kwargs:
         user = kwargs.pop("user")
         CeleryTaskTracker.objects.create(taskid=self.request.id,
                                          taskclass=self.__class__.__name__,
                                          owner=user)
         get_pulp_server(user=user)
     self.errors = []
     return Task.__call__(self, *args, **kwargs)
Ejemplo n.º 9
0
 def queue_new_feeds(self, new_feeds=None):
     if not new_feeds:
         new_feeds = UserSubscription.objects.filter(user=self.user, 
                                                     feed__fetched_once=False, 
                                                     active=True).values('feed_id')
         new_feeds = list(set([f['feed_id'] for f in new_feeds]))
     logging.user(self.user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
     size = 4
     publisher = Task.get_publisher(exchange="new_feeds")
     for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
         NewFeeds.apply_async(args=(t,), queue="new_feeds", publisher=publisher)
     publisher.connection.close()   
Ejemplo n.º 10
0
 def queue_new_feeds(self, new_feeds=None):
     if not new_feeds:
         new_feeds = UserSubscription.objects.filter(user=self.user, 
                                                     feed__fetched_once=False, 
                                                     active=True).values('feed_id')
         new_feeds = list(set([f['feed_id'] for f in new_feeds]))
     logging.user(self.user, "~BB~FW~SBQueueing NewFeeds: ~FC(%s) %s" % (len(new_feeds), new_feeds))
     size = 4
     publisher = Task.get_publisher(exchange="new_feeds")
     for t in (new_feeds[pos:pos + size] for pos in xrange(0, len(new_feeds), size)):
         NewFeeds.apply_async(args=(t,), queue="new_feeds", publisher=publisher)
     publisher.connection.close()   
Ejemplo n.º 11
0
    def task_feeds(cls, feeds, queue_size=12):
        print " ---> Tasking %s feeds..." % feeds.count()

        publisher = Task.get_publisher()

        feed_queue = []
        for f in feeds:
            f.queued_date = datetime.datetime.utcnow()
            f.set_next_scheduled_update()

        for feed_queue in (feeds[pos : pos + queue_size] for pos in xrange(0, len(feeds), queue_size)):
            feed_ids = [feed.pk for feed in feed_queue]
            UpdateFeeds.apply_async(args=(feed_ids,), queue="update_feeds", publisher=publisher)

        publisher.connection.close()
Ejemplo n.º 12
0
def sync_user(twitter_id, source=None, crawl_mode=False):
    logger = Task.get_logger()
    logger.info("[sync_user] starting sync_user(%s)" % twitter_id)
    try:
        user = TwitterUser.objects.get(id__exact=str(twitter_id))
    except TwitterUser.DoesNotExist:
        user = TwitterUser(id=twitter_id)
    user.update_from_twitter()
    user.save()
    if source:
        user.follow(source)

    if crawl_mode and user.screen_name:
        crawl.delay(screen_name=user.screen_name)
    else:
        logger.info("[sync_user] got a user with an empty name")
    return user
Ejemplo n.º 13
0
def run_task(
    task: Task,
    router: APIRouter,
    get_route: str,
    force_foreground: Optional[bool] = False,
) -> Dict[str, Any]:

    if force_foreground or config.NEREID_FORCE_FOREGROUND:
        response = dict(data=task(),
                        task_id="foreground",
                        result_route="foreground")

    else:
        response = standard_json_response(task.apply_async(), router,
                                          get_route)

    return response
Ejemplo n.º 14
0
    def task_feeds(cls, feeds, queue_size=12):
        print " ---> Tasking %s feeds..." % feeds.count()

        publisher = Task.get_publisher()

        feed_queue = []
        for f in feeds:
            f.queued_date = datetime.datetime.utcnow()
            f.set_next_scheduled_update()

        for feed_queue in (feeds[pos:pos + queue_size]
                           for pos in xrange(0, len(feeds), queue_size)):
            feed_ids = [feed.pk for feed in feed_queue]
            UpdateFeeds.apply_async(args=(feed_ids, ),
                                    queue='update_feeds',
                                    publisher=publisher)

        publisher.connection.close()
Ejemplo n.º 15
0
    def inner(request, *args):
        lock_id = '%s-%s-built-%s' % (datetime.date.today(), func.__name__,
                                      ",".join([str(a) for a in args]))

        if cache.add(lock_id, 'true', LOCK_EXPIRE):
            result = func(request, *args)
            cache.set(lock_id, result.task_id)
        else:
            task_id = cache.get(lock_id)
            if not task_id:
                return None

            cache.set(lock_id, "")
            result = Task.AsyncResult(task_id)
            if result.ready():
                result.forget()
                return None
        return result
Ejemplo n.º 16
0
Archivo: tasks.py Proyecto: koh1/sim_ds
def add(x, y):
    logger = Task.get_logger(task_name=u'decorator')
    logger.info("Adding %s + %s" % (x, y))
    return x + y
Ejemplo n.º 17
0
 def test_annotate(self):
     with patch("celery.app.task.resolve_all_annotations") as anno:
         anno.return_value = [{"FOO": "BAR"}]
         Task.annotate()
         self.assertEqual(Task.FOO, "BAR")
Ejemplo n.º 18
0
 def test_annotate(self):
     with patch('celery.app.task.resolve_all_annotations') as anno:
         anno.return_value = [{'FOO': 'BAR'}]
         Task.annotate()
         self.assertEqual(Task.FOO, 'BAR')
Ejemplo n.º 19
0
Archivo: tasks.py Proyecto: koh1/sim_ds
def mbs_exec(conf):
    logger = Task.get_logger()
    return commands.getstatusoutput(cmd)
Ejemplo n.º 20
0
 def test_annotate(self):
     with patch('celery.app.task.resolve_all_annotations') as anno:
         anno.return_value = [{'FOO': 'BAR'}]
         Task.annotate()
         self.assertEqual(Task.FOO, 'BAR')
Ejemplo n.º 21
0
 def test_annotate(self):
     with patch("celery.app.task.resolve_all_annotations") as anno:
         anno.return_value = [{"FOO": "BAR"}]
         Task.annotate()
         self.assertEqual(Task.FOO, "BAR")
Ejemplo n.º 22
0
def test_celery_task_can_execute(a_dummy_celery_task: Task) -> None:
    assert a_dummy_celery_task.delay().get(timeout=2) == 42
Ejemplo n.º 23
0
def update_flickrify_cache(uuid):
    logger = Task.get_logger()
    logger.info("[update_flickrify_cache] starting work on %s" % uuid)
    do_flickrify(uuid, force_refresh=True)
Ejemplo n.º 24
0
Archivo: tasks.py Proyecto: koh1/sim_ds
def exec_d2xp_mbs(conf, scale, num_area):
    logger = Task.get_logger()

    conf_pst_fix = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
    fo = open("%s/message_simulator/config_%s.yml" % (os.environ['HOME'], conf_pst_fix,) , "w")
    fo.write(yaml.dump(conf))
    fo.close()

    ## routing configuration
    rt_conf_file = "conf/routing_%d_area%d.csv" % (scale, num_area)

    ## node_spec 
    nd_spec_file = "conf/node_spec_%d.yml" % scale

    ## network definition
    nw_def_file = "conf/network_%d.yml" % scale

    ## area definitiion
    area_def_file = "conf/area_info_%d_area%d.csv" % (scale, num_area)
    
    cdir = "%s/message_simulator" % os.environ['HOME']
    cmd = "python d2xp_sim_system.py config_%s.yml %s %s %s %s" % (conf_pst_fix,
                                                                   rt_conf_file, 
                                                                   nd_spec_file,
                                                                   nw_def_file,
                                                                   area_def_file)

    p = subprocess.Popen(cmd, cwd=cdir, shell=True,
                         stdin=subprocess.PIPE,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    
    ext_code = p.wait()
    result = {}
    result['exit_code'] = ext_code
    result['stdout'] = p.stdout.readlines()
#    result['stdout'] = r"%s" % p.stdout
    result['stderr'] = p.stderr.readlines()
#    result['stderr'] = r"%s" % p.stderr
    logger.info(json.dumps(result, sort_keys=True, indent=2))

    ## very poor implementation because these worker tasks 
    ## are seperated from the simulation program "mbs". 
    ## Simulation ID will be acquired from the log string.
    sim_id = ""
    if ext_code == 0:
        # mbs is successfully completed.
        for line in result['stdout']:
            items = line.split(' ')
            if items[0] == "Simulation":
                sim_id = items[1]
        if sim_id == "":
            ## simulation was failed
            sim_id = "may_be_failed_%s" % datetime.datetime.today().strftime("%Y%m%d%H%M%S")
    
    result['sim_id'] = sim_id
    task_id = exec_d2xp_mbs.request.id

    ## create and issue a task for retrieving the simulation result.
    ## this task will be got by main worker on GUI with MySQL Server
    r = retrieve_mbs_result.apply_async(args=[task_id], queue='MAIN')

    ## store the simulation result. the result will be stored in broker (RabbitMQ)
    return json.dumps(result)
Ejemplo n.º 25
0
Archivo: tasks.py Proyecto: koh1/sim_ds
def exec_mbs():
    logger = Task.get_logger()
    os.chdir("~/message_simulator")
    return os.environ['HOME']