Пример #1
0
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'], event_task.get('from_mage', False), event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        with r.lock('fire_campaign_%s' % event_task['id'], timeout=120):
            event = EventFire.objects.filter(pk=event_task['id'], fired=None).first()
            if event:
                event.fire()

    else:
        raise Exception("Unexpected event type: %s" % event_task)
Пример #2
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    msg_tasks = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not msg_tasks:
        return

    if not isinstance(msg_tasks, list):
        msg_tasks = [msg_tasks]

    r = get_redis_connection()

    # acquire a lock on our contact to make sure two sets of msgs aren't being sent at the same time
    try:
        with r.lock('send_contact_%d' % msg_tasks[0]['contact'], timeout=300):
            # send each of our msgs
            while msg_tasks:
                msg_task = msg_tasks.pop(0)
                msg = dict_to_struct('MockMsg', msg_task,
                                     datetime_fields=['modified_on', 'sent_on', 'created_on', 'queued_on', 'next_attempt'])
                Channel.send_message(msg)

                # if there are more messages to send for this contact, sleep a second before moving on
                if msg_tasks:
                    time.sleep(1)

    finally:  # pragma: no cover
        # if some msgs weren't sent for some reason, then requeue them for later sending
        if msg_tasks:
            # requeue any unsent msgs
            push_task(msg_tasks[0]['org'], MSG_QUEUE, SEND_MSG_TASK, msg_tasks)
Пример #3
0
def start_msg_flow_batch_task():
    # pop off the next task
    task = pop_task('start_msg_flow_batch')

    # it is possible that somehow we might get None back if more workers were started than tasks got added, bail if so
    if task is None:
        return

    # instantiate all the objects we need that were serialized as JSON
    flow = Flow.objects.get(pk=task['flow'])
    broadcasts = [] if not task['broadcasts'] else Broadcast.objects.filter(
        pk__in=task['broadcasts'])
    started_flows = [] if not task['started_flows'] else task['started_flows']
    start_msg = None if not task['start_msg'] else Msg.all_messages.filter(
        pk=task['start_msg']).first()
    extra = task['extra']
    flow_start = None if not task['flow_start'] else FlowStart.objects.filter(
        pk=task['flow_start']).first()

    # and go do our work
    flow.start_msg_flow_batch(task['contacts'],
                              broadcasts=broadcasts,
                              started_flows=started_flows,
                              start_msg=start_msg,
                              extra=extra,
                              flow_start=flow_start)
Пример #4
0
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'],
                             event_task.get('from_mage', False),
                             event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        with r.lock('fire_campaign_%s' % event_task['id'], timeout=120):
            event = EventFire.objects.filter(pk=event_task['id'],
                                             fired=None).first()
            if event:
                event.fire()

    else:
        raise Exception("Unexpected event type: %s" % event_task)
Пример #5
0
def start_msg_flow_batch_task():
    logger = start_msg_flow_batch_task.get_logger()

    try:
        # pop off the next task
        task = pop_task('start_msg_flow_batch')

        # it is possible that somehow we might get None back if more workers were started than tasks got added, bail if so
        if task is None:
            return

        # instantiate all the objects we need that were serialized as JSON
        flow = Flow.objects.get(pk=task['flow'])
        batch_contacts = list(Contact.objects.filter(pk__in=task['contacts']))
        broadcasts = [] if not task['broadcasts'] else Broadcast.objects.filter(pk__in=task['broadcasts'])
        started_flows = [] if not task['started_flows'] else task['started_flows']
        start_msg = None if not task['start_msg'] else Msg.objects.filter(pk=task['start_msg']).first()
        extra = task['extra']
        flow_start = None if not task['flow_start'] else FlowStart.objects.filter(pk=task['flow_start']).first()

        # and go do our work
        flow.start_msg_flow_batch(batch_contacts, broadcasts=broadcasts,
                                  started_flows=started_flows, start_msg=start_msg,
                                  extra=extra, flow_start=flow_start)
    except Exception as e:
        import traceback
        traceback.print_exc(e)
        logger.exception("Error starting flow: %s" % id)
Пример #6
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    task = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not task:
        return

    msg = dict_to_struct('MockMsg', task, datetime_fields=['delivered_on', 'sent_on', 'created_on',
                                                           'queued_on', 'next_attempt'])

    # send it off
    Channel.send_message(msg)
Пример #7
0
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently three types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
       timeout - Which contains a run that timed out and needs to be resumed
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'],
                             event_task.get('from_mage', False),
                             event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        key = 'fire_campaign_%s' % event_task['id']
        if not r.get(key):
            with r.lock(key, timeout=120):
                event = EventFire.objects.filter(pk=event_task['id'], fired=None)\
                                         .select_related('event', 'event__campaign', 'event__campaign__org').first()
                if event:
                    print "E[%09d] Firing for org: %s" % (
                        event.id, event.event.campaign.org.name)
                    start = time.time()
                    event.fire()
                    print "E[%09d] %08.3f s" % (event.id, time.time() - start)

    elif event_task['type'] == TIMEOUT_EVENT:
        timeout_on = json_date_to_datetime(event_task['timeout_on'])
        process_run_timeout(event_task['run'], timeout_on)

    else:
        raise Exception("Unexpected event type: %s" % event_task)
Пример #8
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    cur_task = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not cur_task:
        return

    msg = dict_to_struct('MockMsg', cur_task,
                         datetime_fields=['modified_on', 'sent_on', 'created_on', 'queued_on', 'next_attempt'])

    # send it off
    r = get_redis_connection()
    with r.lock('send_msg_%d' % msg.id, timeout=300):
        Channel.send_message(msg)
Пример #9
0
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
       timeout - Which contains a run that timed out and needs to be resumed
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'], event_task.get('from_mage', False), event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        key = 'fire_campaign_%s' % event_task['id']
        if not r.get(key):
            with r.lock(key, timeout=120):
                event = EventFire.objects.filter(pk=event_task['id'], fired=None)\
                                         .select_related('event', 'event__campaign', 'event__campaign__org').first()
                if event:
                    print "E[%09d] Firing for org: %s" % (event.id, event.event.campaign.org.name)
                    start = time.time()
                    event.fire()
                    print "E[%09d] %08.3f s" % (event.id, time.time() - start)

    elif event_task['type'] == TIMEOUT_EVENT:
        timeout_on = json_date_to_datetime(event_task['timeout_on'])
        process_run_timeout(event_task['run'], timeout_on)

    else:
        raise Exception("Unexpected event type: %s" % event_task)
Пример #10
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    logger = send_msg_task.get_logger()

    # pop off the next task
    task = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not task:
        return

    msg = dict_to_struct('MockMsg', task,
                         datetime_fields=['modified_on', 'sent_on', 'created_on', 'queued_on', 'next_attempt'])

    # send it off
    r = get_redis_connection()
    with r.lock('send_msg_%d' % msg.id, timeout=300):
        Channel.send_message(msg)
Пример #11
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    task = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not task:
        return

    msg = dict_to_struct('MockMsg',
                         task,
                         datetime_fields=[
                             'delivered_on', 'sent_on', 'created_on',
                             'queued_on', 'next_attempt'
                         ])

    # send it off
    Channel.send_message(msg)
Пример #12
0
def start_msg_flow_batch_task():
    # pop off the next task
    task = pop_task('start_msg_flow_batch')

    # it is possible that somehow we might get None back if more workers were started than tasks got added, bail if so
    if task is None:
        return

    # instantiate all the objects we need that were serialized as JSON
    flow = Flow.objects.get(pk=task['flow'])
    broadcasts = [] if not task['broadcasts'] else Broadcast.objects.filter(pk__in=task['broadcasts'])
    started_flows = [] if not task['started_flows'] else task['started_flows']
    start_msg = None if not task['start_msg'] else Msg.all_messages.filter(pk=task['start_msg']).first()
    extra = task['extra']
    flow_start = None if not task['flow_start'] else FlowStart.objects.filter(pk=task['flow_start']).first()

    # and go do our work
    flow.start_msg_flow_batch(task['contacts'], broadcasts=broadcasts,
                              started_flows=started_flows, start_msg=start_msg,
                              extra=extra, flow_start=flow_start)
Пример #13
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    logger = send_msg_task.get_logger()

    try:
        # pop off the next task
        task = pop_task(SEND_MSG_TASK)

        # it is possible we have no message to send, if so, just return
        if not task:
            return

        msg = dict_to_struct('MockMsg', task, datetime_fields=['delivered_on', 'sent_on', 'created_on',
                             'queued_on', 'next_attempt'])

        # send it off
        Channel.send_message(msg)

    except Exception as e:
        logger.exception("Error sending msg: %d" % msg.id)
Пример #14
0
def start_msg_flow_batch_task():
    logger = start_msg_flow_batch_task.get_logger()

    try:
        # pop off the next task
        task = pop_task('start_msg_flow_batch')

        # it is possible that somehow we might get None back if more workers were started than tasks got added, bail if so
        if task is None:
            return

        # instantiate all the objects we need that were serialized as JSON
        flow = Flow.objects.get(pk=task['flow'])
        batch_contacts = list(Contact.objects.filter(pk__in=task['contacts']))
        broadcasts = [] if not task['broadcasts'] else Broadcast.objects.filter(
            pk__in=task['broadcasts'])
        started_flows = [] if not task['started_flows'] else task[
            'started_flows']
        start_msg = None if not task['start_msg'] else Msg.objects.filter(
            pk=task['start_msg']).first()
        extra = task['extra']
        flow_start = None if not task[
            'flow_start'] else FlowStart.objects.filter(
                pk=task['flow_start']).first()

        # and go do our work
        flow.start_msg_flow_batch(batch_contacts,
                                  broadcasts=broadcasts,
                                  started_flows=started_flows,
                                  start_msg=start_msg,
                                  extra=extra,
                                  flow_start=flow_start)
    except Exception as e:
        import traceback
        traceback.print_exc(e)
        logger.exception("Error starting flow: %s" % id)