def get(self):
        from furious.async import Async
        from furious import context

        count = int(self.request.get('tasks', 5))

        # Create a new furious Context.
        with context.new() as ctx:

            # Set a completion event handler.
            log = Log()
            log.put()
            ctx.set_event_handler('complete',
                                  Async(context_complete, args=[ctx.id, log.key.id()]))

            # Insert some Asyncs.
            for i in xrange(count):
                queue = 'a-worker-1'

                if i % 2 == 0:
                    queue = 'z-worker-2'

                ctx.add(
                    target=async_worker, queue=queue,
                    args=[ctx.id, i, log.key.id()])
                logging.info('Added job %d to context.', i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).
        logging.info('Async jobs for context batch inserted.')
        message = "Successfully inserted a group of %s Async jobs." % str(count)
        self.response.out.write(message)
Example #2
0
def process_exception(data):
    """Process the exception data."""

    project_id = data['project_id']

    repo = Repository.get_by_id(project_id)

    if not repo or not repo.enabled:
        raise Abort('Repo %s is not enabled' % project_id)

    unix_timestamp = data['timestamp']
    timestamp = datetime.fromtimestamp(unix_timestamp)
    exception = data['exception']
    message = data['message']
    frames = data['frames']
    stacktrace = data['stacktrace']

    issue = Issue(repo=repo.key, timestamp=timestamp, exception=exception,
                  message=message, frames=frames, stacktrace=stacktrace,
                  contacts=[])

    issue.put()

    logging.debug('Saved new issue for repo %s' % project_id)

    with context.new() as ctx:
        for filename, line_no, func, text in frames:
            ctx.add(target=notify,
                    args=(project_id, issue.key.id(), unix_timestamp, filename,
                          line_no, func, text, stacktrace))

    logging.debug('Inserted %s notify tasks' % ctx.insert_success)
Example #3
0
def start(number_of_items):
    logging.info("******** Starting the process. ******** ")
    job_id = uuid.uuid4().hex
    logging.info("******* JOB ID: {}.".format(job_id))

    start_time = _get_current_datetime_as_string()

    rs = RunState(id=job_id)
    rs.count_map = {}

    cs = CompleteState(id=job_id)
    cs.number_of_items = number_of_items
    cs.complete = False

    ndb.put_multi([rs, cs])

    with context.new() as ctx:
        ctx.set_event_handler('complete', Async(
            completion_handler, args=[job_id, start_time]))

        for i in xrange(number_of_items):
            logging.info("###### JOB ITEM: {}.".format(i))
            ctx.add(target=run_process,
                    args=[job_id, i, _get_current_datetime_as_string()])

    logging.info("###### JOBS STARTED")

    return job_id
Example #4
0
def _tag_commits(repo, release):
    """Tag any untagged Commits with the given Release.

    Args:
        repo: the Repository to tag Commits for.
        release: the Release to tag with.
    """

    query = Commit.query(ancestor=repo.key).filter(Commit.release == None)

    with context.new() as ctx:
        # Associate past commits with the release. Fan out tasks to process
        # in batches.
        cursor = None
        while True:
            commit_keys, cursor, more = query.fetch_page(
                500, start_cursor=cursor, keys_only=True)

            for keys in chunk(commit_keys, 10):
                logging.debug('Inserting task to tag commit release')
                ctx.add(target=tag_commit,
                        args=(repo.key.id(), release.key.id(),
                              [key.id() for key in keys]))

            if not more or not commit_keys:
                break
Example #5
0
def _aggregate_trend_content(trends, location):
    """Insert tasks to aggregate content for the given trends."""

    with context.new() as ctx:
        for trend in trends:
            ctx.add(target=aggregate_content, queue=CONTENT_QUEUE,
                    args=(trend.name, location.name, trend.unix_timestamp()))
Example #6
0
def _aggregate_trend_content(trends, location):
    """Insert tasks to aggregate content for the given trends."""

    with context.new() as ctx:
        for trend in trends:
            ctx.add(target=aggregate_content,
                    queue=CONTENT_QUEUE,
                    args=(trend.name, location.name, trend.unix_timestamp()))
Example #7
0
    def get(self):
        query = self.request.get("query")
        curdir = os.getcwd()

        # create the context and start the first Async
        with context.new():
            build_and_start(query, curdir)

        self.response.out.write("starting grep for query: %s" % query)
    def get(self):
        query = self.request.get('query')
        curdir = os.getcwd()

        # create the context and start the first Async
        with context.new():
            build_and_start(query, curdir)

        self.response.out.write('starting grep for query: %s' % query)
Example #9
0
def _insert_tasks(job_id, batch_size, task_count, task_run_time, use_batcher,
                  kwargs):
    if use_batcher:
        with context.new(batch_size=batch_size) as ctx:
            for count in xrange(task_count):
                ctx.add(run_task, args=(job_id, count, task_run_time),
                        **kwargs)
    elif batch_size > 1:
        for batch in chunks(range(task_count), batch_size):
            with context.new() as ctx:
                for count in batch:
                    ctx.add(run_task, args=(job_id, count, task_run_time),
                            **kwargs)
    else:
        with context.new() as ctx:
            for count in xrange(task_count):
                ctx.add(run_task, args=(job_id, count, task_run_time),
                        **kwargs)
Example #10
0
    def test_new_adds_to_registry(self):
        """Ensure new adds new contexts to the context registry."""
        from furious.context import Context
        from furious.context._local import get_local_context
        from furious.context import new

        ctx = new()

        self.assertIsInstance(ctx, Context)
        self.assertIn(ctx, get_local_context().registry)
Example #11
0
    def test_new_adds_to_registry(self):
        """Ensure new adds new contexts to the context registry."""
        from furious.context import Context
        from furious.context._local import get_local_context
        from furious.context import new

        ctx = new()

        self.assertIsInstance(ctx, Context)
        self.assertIn(ctx, get_local_context().registry)
Example #12
0
    def test_new_adds_to_registry(self):
        """Ensure new returns a new context."""
        from furious.context import Context
        from furious.context import _local_context
        from furious.context import new

        ctx = new()

        self.assertIsInstance(ctx, Context)
        self.assertIn(ctx, _local_context.registry)
Example #13
0
    def test_new_auto_context(self):
        """Ensure new returns a new AutoContext when batch size is specified.
        """
        from furious.context import AutoContext
        from furious.context import new

        batch_size = 100

        context = new(batch_size=batch_size)

        self.assertIsInstance(context, AutoContext)
        self.assertEqual(context.batch_size, batch_size)
Example #14
0
def _insert_tasks(job_id, batch_size, task_count, task_run_time, use_batcher,
                  kwargs):
    if use_batcher:
        with context.new(batch_size=batch_size) as ctx:
            for count in xrange(task_count):
                ctx.add(run_task,
                        args=(job_id, count, task_run_time),
                        **kwargs)
    elif batch_size > 1:
        for batch in chunks(range(task_count), batch_size):
            with context.new() as ctx:
                for count in batch:
                    ctx.add(run_task,
                            args=(job_id, count, task_run_time),
                            **kwargs)
    else:
        with context.new() as ctx:
            for count in xrange(task_count):
                ctx.add(run_task,
                        args=(job_id, count, task_run_time),
                        **kwargs)
    def test_new_auto_context(self):
        """Ensure new returns a new AutoContext when batch size is specified.
        """
        from furious.context import AutoContext
        from furious.context import new

        batch_size = 100

        context = new(batch_size=batch_size)

        self.assertIsInstance(context, AutoContext)
        self.assertEqual(context.batch_size, batch_size)
    def get(self):
        sleep = self.request.get('sleep', 1)
        num = int(self.request.get('num', 1))
        queue = self.request.get('queue', 'default')

        logging.info('sleep: %s', sleep)
        logging.info('num: %s', num)
        logging.info('queue: %s', queue)

        with context.new() as ctx:
            for i in xrange(int(num)):
                ctx.add(sleeper, (sleep, ), queue=queue)

        self.response.out.write('Successfully inserted a group of Async jobs.')
Example #17
0
def load_feed():
    stream = feedparser.parse("http://news.ycombinator.com/rss")

    if not stream:
        print "Missing stream?"
        return

    if not stream.feed:
        print "Missing feed?"
        print stream
        return

    print stream.feed.title

    with context.new() as ctx:
        for entry in stream.entries:
            ctx.add(target=strip_article, args=[entry.link])
Example #18
0
def aggregate():
    """Kick off the trend aggregation process."""

    logging.debug('Aggregation process started')

    # Only aggregate data for coarse-grained locations, e.g. countries
    locations = twitter.get_locations_with_trends(exclude=EXCLUDE_TYPES)
    logging.debug('Fetched %d locations from Twitter' % len(locations))

    # Fan out on locations, 15 per batch. Due to Twitter's 15 minute request
    # window, we space these batches out by 16 minutes.
    with context.new() as ctx:
        for i, batch in enumerate(chunk(locations, BATCH_SIZE)):
            ctx.add(target=aggregate_for_locations, args=(batch,),
                    queue=AGGREGATION_QUEUE,
                    task_args={'countdown': THROTTLE_TIME * i})

    logging.debug('Inserted %d fan-out tasks' % ctx.insert_success)
Example #19
0
    def get(self):
        from furious import context
        from furious.batcher import Message
        from furious.batcher import MessageProcessor

        try:
            color, value, count = self.get_params()
        except (KeyError, AssertionError):
            response = {
                "success": False,
                "message": "Invalid parameters."
            }
            self.response.write(json.dumps(response))
            return

        payload = {
            "color": color,
            "value": value,
            "timestamp": time.mktime(datetime.datetime.utcnow().timetuple())
        }

        tag = "color"

        # create a context to insert multiple Messages
        with context.new() as ctx:
            # loop through the count adding a task to the context per increment
            for _ in xrange(count):
                # insert the message with the payload
                ctx.add(Message(task_args={"payload": payload, "tag": tag}))

        # insert a processor to fetch the messages in batches
        # this should always be inserted. the logic will keep it from inserting
        # too many processors
        processor = MessageProcessor(
            target=process_messages, args=(tag,), tag=tag,
            task_args={"countdown": 0})
        processor.start()

        response = {
            "success": True,
            "message": "Task inserted successfully with %s" % (payload,)
        }

        self.response.write(json.dumps(response))
Example #20
0
def aggregate():
    """Kick off the trend aggregation process."""

    logging.debug('Aggregation process started')

    # Only aggregate data for coarse-grained locations, e.g. countries
    locations = twitter.get_locations_with_trends(exclude=EXCLUDE_TYPES)
    logging.debug('Fetched %d locations from Twitter' % len(locations))

    # Fan out on locations, 15 per batch. Due to Twitter's 15 minute request
    # window, we space these batches out by 16 minutes.
    with context.new() as ctx:
        for i, batch in enumerate(chunk(locations, BATCH_SIZE)):
            ctx.add(target=aggregate_for_locations,
                    args=(batch, ),
                    queue=AGGREGATION_QUEUE,
                    task_args={'countdown': THROTTLE_TIME * i})

    logging.debug('Inserted %d fan-out tasks' % ctx.insert_success)
Example #21
0
    def get(self):
        from furious import context
        from furious.batcher import Message
        from furious.batcher import MessageProcessor

        try:
            color, value, count = self.get_params()
        except (KeyError, AssertionError):
            response = {"success": False, "message": "Invalid parameters."}
            self.response.write(json.dumps(response))
            return

        payload = {
            "color": color,
            "value": value,
            "timestamp": time.mktime(datetime.datetime.utcnow().timetuple())
        }

        tag = "color"

        # create a context to insert multiple Messages
        with context.new() as ctx:
            # loop through the count adding a task to the context per increment
            for _ in xrange(count):
                # insert the message with the payload
                ctx.add(Message(task_args={"payload": payload, "tag": tag}))

        # insert a processor to fetch the messages in batches
        # this should always be inserted. the logic will keep it from inserting
        # too many processors
        processor = MessageProcessor(target=process_messages,
                                     args=(tag, ),
                                     tag=tag,
                                     task_args={"countdown": 0})
        processor.start()

        response = {
            "success": True,
            "message": "Task inserted successfully with %s" % (payload, )
        }

        self.response.write(json.dumps(response))
Example #22
0
def sync_users(cursor=None):
    """Insert tasks to sync Users with GitHub.

    Args:
        cursor: urlsafe cursor to begin fetching users at.
    """

    query = User.query()
    if cursor:
        cursor = Cursor(urlsafe=cursor)

    keys, cursor, more = query.fetch_page(100, start_cursor=cursor,
                                          keys_only=True)

    with context.new() as ctx:
        logging.debug('Inserting task to sync %s user accounts' % len(keys))
        ctx.add(target=_sync_users, args=([key.id() for key in keys],))

        if more:
            ctx.add(target=sync_users, kwargs={'cursor': cursor.urlsafe()})
Example #23
0
def process_repo_push(repo, owner, push_data):
    """Process a push to a repo by collecting data for each commit

    Args:
        repo: Repository instance of the repo that was pushed to.
        owner: User instance of the owner of the repo pushed to.
        push_data: dict containing data for the push.
    """

    logging.debug('Processing push to repo %s' % repo.key.id())

    with context.new() as ctx:
        # Fan out tasks for each commit in the push.
        for commit in push_data['commits']:
            logging.debug('Inserting task for commit %s' % commit['id'])

            ctx.add(
                target=process_commit,
                args=(repo.key.id(), commit['id'], owner.key.id()))

    logging.debug('Inserted %d fan-out tasks' % ctx.insert_success)
    def get(self):
        from furious.async import Async
        from furious import context

        # Create a new furious Context.
        with context.new(callbacks={'internal_vertex_combiner': l_combiner,
                                    'leaf_combiner': l_combiner,
                                    'success': example_callback_success}) as ctx:
            # "Manually" instantiate and add an Async object to the Context.
            async_task = Async(
                target=example_function, kwargs={'first': 'async'})
            ctx.add(async_task)
            logging.info('Added manual job to context.')

            # instantiate and add an Async who's function creates another Context.
            # enabling extra fan-out of a job
            async_task = Async(
                target=make_a_new_context_example, kwargs={'extra': 'async'})
            ctx.add(async_task)
            logging.info('Added sub context')

            # Use the shorthand style, note that add returns the Async object.
            for i in xrange(25):
                ctx.add(target=example_function, args=[i])
                logging.info('Added job %d to context.', i)

            # Instantiate and add an Async who's function creates another Async
            # enabling portions of the job to be serial
            async_task = Async(
                target=make_a_new_async_example, kwargs={'second': 'async'})
            ctx.add(async_task)
            logging.info('Added added async that returns an async')

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info('Async jobs for context batch inserted.')

        self.response.out.write('Successfully inserted a '
                                'group of Async jobs with Furious:{0}'.format(ctx.id))
Example #25
0
    def get(self):
        from furious.async import Async
        from furious import context

        count = int(self.request.get("tasks", 5))

        # Create a new furious Context.
        with context.new() as ctx:
            # Set a completion event handler.
            ctx.set_event_handler("complete", Async(context_complete, args=[ctx.id]))

            # Insert some Asyncs.
            for i in xrange(count):
                ctx.add(target=async_worker, args=[ctx.id, i])
                logging.info("Added job %d to context.", i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info("Async jobs for context batch inserted.")

        self.response.out.write("Successfully inserted a group of Async jobs.")
Example #26
0
    def get(self):
        from furious. async import Async
        from furious import context

        # Create a new furious Context.
        with context.new() as ctx:
            # "Manually" instantiate and add an Async object to the Context.
            async_task = Async(target=example_function,
                               kwargs={'first': 'async'})
            ctx.add(async_task)
            logging.info('Added manual job to context.')

            # Use the shorthand style, note that add returns the Async object.
            for i in xrange(5):
                ctx.add(target=example_function, args=[i])
                logging.info('Added job %d to context.', i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info('Async jobs for context batch inserted.')

        self.response.out.write('Successfully inserted a group of Async jobs.')
Example #27
0
    def get(self):
        from furious. async import Async
        from furious import context

        count = int(self.request.get('tasks', 5))

        # Create a new furious Context.
        with context.new() as ctx:
            # Set a completion event handler.
            ctx.set_event_handler('complete',
                                  Async(context_complete, args=[ctx.id]))

            # Insert some Asyncs.
            for i in xrange(count):
                ctx.add(target=async_worker, args=[ctx.id, i])
                logging.info('Added job %d to context.', i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info('Async jobs for context batch inserted.')

        self.response.out.write('Successfully inserted a group of Async jobs.')
Example #28
0
    def get(self):
        from furious.async import Async
        from furious import context

        # Create a new furious Context.
        with context.new() as ctx:
            # "Manually" instantiate and add an Async object to the Context.
            async_task = Async(
                target=example_function, kwargs={'first': 'async'})
            ctx.add(async_task)
            logging.info('Added manual job to context.')

            # Use the shorthand style, note that add returns the Async object.
            for i in xrange(5):
                ctx.add(target=example_function, args=[i])
                logging.info('Added job %d to context.', i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info('Async jobs for context batch inserted.')

        self.response.out.write('Successfully inserted a group of Async jobs.')
    def get(self):
        from furious.async import Async
        from furious import context

        count = self.request.get('tasks', 5)

        # Create a new furious Context with persistance enabled.
        with context.new(persist_async_results=True) as ctx:
            # Set a completion event handler.
            ctx.set_event_handler('complete',
                                  Async(context_complete, args=[ctx.id]))

            # Insert some Asyncs.
            for i in xrange(int(count)):
                ctx.add(target=async_worker, args=[ctx.id, i])
                logging.info('Added job %d to context.', i)

        # When the Context is exited, the tasks are inserted (if there are no
        # errors).

        logging.info('Async jobs for context batch inserted.')

        self.response.out.write('Successfully inserted a group of Async jobs.')
Example #30
0
def insert_tasks(count):
    from furious. async import Async
    from furious import context
    # Create a new furious Context.
    with context.new() as ctx:
        # Set a completion event handler.
        ctx.set_event_handler(
            'complete', Async(context_complete, queue='example',
                              args=[ctx.id]))

        # Insert some Asyncs. The completion check will use each async's queue
        for i in xrange(count):

            if i % 2 == 0:
                queue = 'example'
            else:
                queue = 'default'
            ctx.add(target=async_worker, queue=queue, args=[ctx.id, i])
            logging.info('Added job %d to context.', i)

    # When the Context is exited, the tasks are inserted (if there are no
    # errors).

    logging.info('Async jobs for context batch inserted.')
Example #31
0
def insert_tasks(count):
    from furious.async import Async
    from furious import context
    # Create a new furious Context.
    with context.new() as ctx:
        # Set a completion event handler.
        ctx.set_event_handler('complete',
                              Async(context_complete, queue='example',
                                    args=[ctx.id]))

        # Insert some Asyncs. The completion check will use each async's queue
        for i in xrange(count):

            if i % 2 == 0:
                queue = 'example'
            else:
                queue = 'default'
            ctx.add(target=async_worker, queue=queue, args=[ctx.id, i])
            logging.info('Added job %d to context.', i)

    # When the Context is exited, the tasks are inserted (if there are no
    # errors).

    logging.info('Async jobs for context batch inserted.')
Example #32
0
    def test_new(self):
        """Ensure new returns a new context."""
        from furious.context import Context
        from furious.context import new

        self.assertIsInstance(new(), Context)
Example #33
0
    def test_new(self):
        """Ensure new returns a new context."""
        from furious.context import Context
        from furious.context import new

        self.assertIsInstance(new(), Context)