示例#1
0
def tick(request):
    queue = taskqueue.Queue('atpostspud-getspuds')
    task = taskqueue.Task(
        url='/socialengine/postspud/get_latest_at_post_spuds', method='GET')
    queue.add(task)
    return HttpResponse(json.dumps({'status': 'ok'}),
                        content_type='application/json')
示例#2
0
def send_invoices(request):
    try:
        task = taskqueue.Task(url=reverse(invoices_task), params={"action": InvoiceActions.SEND})
        q = taskqueue.Queue("billing")
        q.add(task)
        return HttpResponse("Task Added")
    except Exception, e:
        return HttpResponse("FAILED: %s" % e.message)
示例#3
0
def get_filtered_tasks(taskqueue_stub, url=None, name=None, queue_names=None):
    """Get the tasks in the task queue with filters.

    Args:
        url: A URL that all returned tasks should point at.
        name: The name of all returned tasks.
        queue_names: A list of queue names to retrieve tasks from. If left blank
            this will get default to all queues available.

    Returns:
        A list of taskqueue.Task objects.
    """
    all_queue_names = [queue['name'] for queue in taskqueue_stub.GetQueues()]

    if isinstance(queue_names, basestring):
        queue_names = [queue_names]

    if queue_names is None:
        queue_names = all_queue_names

    task_dicts = []
    for queue_name in queue_names:
        if queue_name in all_queue_names:
            for task in taskqueue_stub.GetTasks(queue_name):
                if url is not None and task['url'] != url:
                    continue
                if name is not None and task['name'] != name:
                    continue
                task_dicts.append(task)

    tasks = []
    for task in task_dicts:

        payload = base64.b64decode(task['body'])

        headers = dict(task['headers'])
        headers['Content-Length'] = str(len(payload))

        eta = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S')
        eta = eta.replace(tzinfo=taskqueue._UTC)

        task_object = taskqueue.Task(name=task['name'],
                                     method=task['method'],
                                     url=task['url'],
                                     headers=headers,
                                     payload=payload,
                                     eta=eta)
        # supercool to user super-private names here
        task_object._Task__queue_name = task['queue_name']
        tasks.append(task_object)
    return tasks
示例#4
0
def run_maintenance_task(request):
    base_name = 'maintenance-task'
    name = request.GET.get("name", base_name)
    task = taskqueue.Task(url=reverse(maintenance_task), name=name)
    q = taskqueue.Queue('maintenance')
    response = HttpResponse("Task Added")
    try:
        q.add(task)
    except TombstonedTaskError:
        response = HttpResponseRedirect(
            url_with_querystring(reverse(run_maintenance_task),
                                 name="%s-%s" % (base_name, get_unique_id())))
    except TaskAlreadyExistsError:
        response = HttpResponse("Task not added: TaskAlreadyExistsError")
    except DuplicateTaskNameError:
        response = HttpResponse("Task not added: DuplicateTaskNameError")

    return response
示例#5
0
def log_connection_events(sender, signal_type, obj, **kwargs):
    from common.tz_support import utc_now
    from django.core.urlresolvers import reverse
    from google.appengine.api.taskqueue import taskqueue
    from analytics.models import AnalyticsEvent
    from common.util import log_event, EventType, notify_by_email
    from ordering.signals import SignalType
    from ordering.station_connection_manager import ALERT_DELTA, handle_dead_workstations

    last_event_qs = AnalyticsEvent.objects.filter(
        work_station=obj,
        type__in=[EventType.WORKSTATION_UP,
                  EventType.WORKSTATION_DOWN]).order_by('-create_date')[:1]
    station = obj.station

    if signal_type == SignalType.WORKSTATION_ONLINE:
        if last_event_qs:
            # send workstation reconnect mail
            last_event = last_event_qs[0]
            if last_event.type == EventType.WORKSTATION_DOWN and (
                    utc_now() - last_event.create_date
            ) >= ALERT_DELTA and station.show_on_list:
                msg = u"Workstation is up again:\n\tid = %d station = %s" % (
                    obj.id, obj.dn_station_name)
                notify_by_email(u"Workstation Reconnected", msg=msg)
        elif station.show_on_list:
            # send "new workstation" mail
            msg = u"A new workstation just connected: id = %d station = %s" % (
                obj.id, obj.dn_station_name)
            notify_by_email(u"New Workstation", msg=msg)

        log_event(EventType.WORKSTATION_UP, station=station, work_station=obj)

    elif signal_type == SignalType.WORKSTATION_OFFLINE:
        log_event(EventType.WORKSTATION_DOWN,
                  station=station,
                  work_station=obj)

        if station.show_on_list:
            # add task to check if workstation is still dead after ALERT_DELTA
            task = taskqueue.Task(url=reverse(handle_dead_workstations),
                                  countdown=ALERT_DELTA.seconds + 1,
                                  params={"workstation_id": obj.id})
            taskqueue.Queue('log-events').add(task)
示例#6
0
文件: log.py 项目: oikmar/fantasm
    def _log(self, level, message, *args, **kwargs):
        """ Logs the message to the normal logging module and also queues a Task to create an _FantasmLog

        @param level:
        @param message:
        @param args:
        @param kwargs:

        NOTE: we are not not using deferred module to reduce dependencies, but we are re-using the helper
              functions .serialize() and .run() - see handler.py
        """
        if not (self.level <= level <= self.maxLevel):
            return

        namespace = kwargs.pop('namespace', None)
        tags = kwargs.pop('tags', None)

        self.getLoggingMap()[level](message, *args, **kwargs)

        if not self.persistentLogging:
            return

        stack = None
        if 'exc_info' in kwargs:
            f = StringIO.StringIO()
            traceback.print_exc(25, f)
            stack = f.getvalue()

        # this _log method requires everything to be serializable, which is not the case for the logging
        # module. if message is not a basestring, then we simply cast it to a string to allow _something_
        # to be logged in the deferred task
        if not isinstance(message, basestring):
            try:
                message = str(message)
            except Exception:
                message = LOG_ERROR_MESSAGE
                if args:
                    args = []
                logging.warning(message, exc_info=True)

        taskName = (self.__obj or {}).get(constants.TASK_NAME_PARAM)

        stateName = None
        if self.context.currentState:
            stateName = self.context.currentState.name

        transitionName = None
        if self.context.startingState and self.context.startingEvent:
            transitionName = self.context.startingState.getTransition(
                self.context.startingEvent).name

        actionName = None
        if self.context.currentAction:
            actionName = self.context.currentAction.__class__.__name__

        # in immediateMode, tack the messages onto obj so that they can be returned
        # in the http response in handler.py
        if self.__obj is not None:
            if self.__obj.get(constants.IMMEDIATE_MODE_PARAM):
                try:
                    self.__obj[constants.MESSAGES_PARAM].append(message % args)
                except TypeError:
                    self.__obj[constants.MESSAGES_PARAM].append(message)

        serialized = deferred.serialize(
            _log,
            taskName,
            self.context.instanceName,
            self.context.machineName,
            stateName,
            actionName,
            transitionName,
            level,
            namespace,
            (self.tags or []) + (tags or []),
            message,
            stack,
            datetime.datetime.now(),  # FIXME: called .utcnow() instead?
            *args,
            **kwargs)

        try:
            task = taskqueue.Task(
                url=constants.DEFAULT_LOG_URL,
                payload=serialized,
                retry_options=taskqueue.TaskRetryOptions(task_retry_limit=20))
            # FIXME: a batch add may be more optimal, but there are quite a few more corners to deal with
            taskqueue.Queue(name=constants.DEFAULT_LOG_QUEUE_NAME).add(task)

        except taskqueue.TaskTooLargeError:
            logging.warning(
                "fantasm log message too large - skipping persistent storage")

        except taskqueue.Error:
            logging.warning(
                "error queuing log message Task - skipping persistent storage",
                exc_info=True)
示例#7
0
def AddTask(payload):
    task = taskqueue.Task(url="/taskbomb", payload=json.dumps(payload))
    return task.add("background")
示例#8
0
def run_gcp_service_test(request):
    task = taskqueue.Task(url=reverse(test_gcp_service_task))
    q = taskqueue.Queue('maintenance')
    q.add(task)

    return HttpResponse("Task Added")