コード例 #1
0
ファイル: tests.py プロジェクト: AbrahamKiggundu/rapidpro
    def test_get_cacheable_result(self):
        self.create_contact("Bob", number="1234")

        def calculate():
            return Contact.objects.all().count()

        with self.assertNumQueries(1):
            self.assertEqual(
                get_cacheable_result('test_contact_count', 60, calculate),
                1)  # from db
        with self.assertNumQueries(0):
            self.assertEqual(
                get_cacheable_result('test_contact_count', 60, calculate),
                1)  # from cache

        self.create_contact("Jim", number="2345")

        with self.assertNumQueries(0):
            self.assertEqual(
                get_cacheable_result('test_contact_count', 60, calculate),
                1)  # not updated

        get_redis_connection().delete(
            'test_contact_count'
        )  # delete from cache for force re-fetch from db

        with self.assertNumQueries(1):
            self.assertEqual(
                get_cacheable_result('test_contact_count', 60, calculate),
                2)  # from db
        with self.assertNumQueries(0):
            self.assertEqual(
                get_cacheable_result('test_contact_count', 60, calculate),
                2)  # from cache
コード例 #2
0
ファイル: data.py プロジェクト: geoapi/conceptgrapher
def clear_all():
	if not settings.DEBUG:
		raise Exception("Can't clear everything when not in DEBUG mode, dude")

	get_redis_connection().flushdb()
	base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
	db_path = os.path.join(base_dir, "cg/sqlite3.db")
	os.remove(db_path)
コード例 #3
0
ファイル: utils.py プロジェクト: allenling/dbss
def distr_object(t_object, **kwargs):
    redis_c = get_redis_connection('feed_storage')
    rfeed_list = kwargs.get('feed_list') 
    rt_object = pickle.dumps(t_object)
    if t_object.get_verbtext() == 'updatefcard':
        redis_fa = get_redis_connection('favlist')
        for i in rfeed_list:
            action = redis_fa.hget(str(i), t_object.get_mobject_id())
            if action.startswith('A') or (action.startswith('B') and str(i) in action):
                redis_c.lpush(str(i), rt_object)
    else:
        for i in rfeed_list:
            redis_c.lpush(i, rt_object)
コード例 #4
0
ファイル: utils.py プロジェクト: allenling/dbss
def distr_object(t_object, **kwargs):
    redis_c = get_redis_connection('feed_storage')
    rfeed_list = kwargs.get('feed_list')
    rt_object = pickle.dumps(t_object)
    if t_object.get_verbtext() == 'updatefcard':
        redis_fa = get_redis_connection('favlist')
        for i in rfeed_list:
            action = redis_fa.hget(str(i), t_object.get_mobject_id())
            if action.startswith('A') or (action.startswith('B')
                                          and str(i) in action):
                redis_c.lpush(str(i), rt_object)
    else:
        for i in rfeed_list:
            redis_c.lpush(i, rt_object)
コード例 #5
0
ファイル: tags.py プロジェクト: LifeMoroz/faq
def get_models(prefix, tag):
    """
    Возвращает id-шники элементов с тегом tag
    Например все вопросы, у которых есть какой-то тег
    """
    r = get_redis_connection()
    return r.smembers(tag_key(prefix, tag))
コード例 #6
0
ファイル: tags.py プロジェクト: LifeMoroz/faq
def get_top(max_tags=10, offset=0):
    r = get_redis_connection()
    # zrevrage возвращает из упорядоченного множества
    # первые max_tags тегов начиная с offset
    # значения отсортированы (в данном случае по кол-ву вопросов
    # в которых они использованы)
    return r.zrevrange(key_all_tags, offset, max_tags)
コード例 #7
0
ファイル: utils.py プロジェクト: allenling/dbss
def get_unread_msg(request):

    msg_redis = get_redis_connection('message')
    if request.user.is_anonymous() == False and request.user.is_authenticated(
    ):
        umsg = msg_redis.get('unread_' + str(request.user.id))
        return True if umsg is not None and int(umsg) > 0 else False
コード例 #8
0
    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise Exception(
                "You must specify a contact field, ruleset or group to invalidate results for"
            )

        if contact_field:
            key = ':' + (CONTACT_KEY % contact_field.id) + ':'
        elif group:
            key = ':' + (GROUP_KEY % group.id) + ':'
        elif ruleset:
            key = ':' + (RULESET_KEY % ruleset.id) + ':'

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        keys = r.keys(VALUE_SUMMARY_CACHE_KEY + "*" + key + "*")
        if keys:
            invalidated = r.delete(*keys)
        else:
            invalidated = 0

        return invalidated
コード例 #9
0
def squash_channelcounts():
    r = get_redis_connection()

    key = 'squash_channelcounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            ChannelCount.squash_counts()
コード例 #10
0
ファイル: models.py プロジェクト: TextoCMR/TexTo.cm
    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise Exception("You must specify a contact field, ruleset or group to invalidate results for")

        if contact_field:
            key = ':' + (CONTACT_KEY % contact_field.id) + ':'
        elif group:
            key = ':' + (GROUP_KEY % group.id) + ':'
        elif ruleset:
            key = ':' + (RULESET_KEY % ruleset.id) + ':'

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        keys = r.keys(VALUE_SUMMARY_CACHE_KEY + "*" + key + "*")
        if keys:
            invalidated = r.delete(*keys)
        else:
            invalidated = 0

        return invalidated
コード例 #11
0
ファイル: tasks.py プロジェクト: weddingjuma/rapidpro-1
def squash_systemlabels():
    r = get_redis_connection()

    key = 'squash_systemlabels'
    if not r.get(key):
        with r.lock(key, timeout=900):
            SystemLabel.squash_counts()
コード例 #12
0
ファイル: tasks.py プロジェクト: xuanhan863/rapidpro
def check_campaigns_task(sched_id=None):
    """
    See if any event fires need to be triggered
    """
    logger = check_campaigns_task.get_logger()

    # get a lock
    r = get_redis_connection()

    key = 'check_campaigns'

    # only do this if we aren't already checking campaigns
    if not r.get(key):
        with r.lock(key, timeout=3600):
            # for each that needs to be fired
            for fire in EventFire.objects.filter(
                    fired=None, scheduled__lte=timezone.now()):
                try:
                    key = 'fire_campaign_%d' % fire.pk
                    if not r.get(key):
                        # try to acquire a lock
                        with r.lock('fire_campaign_%d' % fire.pk, timeout=120):
                            # reload it
                            fire = EventFire.objects.get(id=fire.pk)
                            if not fire.fired:
                                fire.fire()

                except:  # pragma: no cover
                    logger.error("Error running campaign event: %s" % fire.pk,
                                 exc_info=True)
コード例 #13
0
ファイル: tasks.py プロジェクト: ewheeler/rapidpro
def process_message_task(msg_id, from_mage=False, new_contact=False):
    """
    Processes a single incoming message through our queue.
    """
    r = get_redis_connection()
    msg = Msg.current_messages.filter(pk=msg_id, status=PENDING).select_related('org', 'contact', 'contact_urn', 'channel').first()

    # somebody already handled this message, move on
    if not msg:
        return

    # get a lock on this contact, we process messages one by one to prevent odd behavior in flow processing
    key = 'pcm_%d' % msg.contact_id
    if not r.get(key):
        with r.lock(key, timeout=120):
            print "M[%09d] Processing - %s" % (msg.id, msg.text)
            start = time.time()

            # if message was created in Mage...
            if from_mage:
                mage_handle_new_message(msg.org, msg)
                if new_contact:
                    mage_handle_new_contact(msg.org, msg.contact)

            Msg.process_message(msg)
            print "M[%09d] %08.3f s - %s" % (msg.id, time.time() - start, msg.text)
コード例 #14
0
def check_campaigns_task(sched_id=None):
    """
    See if any event fires need to be triggered
    """
    logger = check_campaigns_task.get_logger()

    # get a lock
    r = get_redis_connection()

    key = 'check_campaigns'

    # only do this if we aren't already checking campaigns
    if not r.get(key):
        with r.lock(key, timeout=3600):
            # for each that needs to be fired
            for fire in EventFire.objects.filter(
                    fired=None, scheduled__lte=timezone.now()).select_related(
                        'contact', 'contact__org'):
                try:
                    push_task(fire.contact.org, HANDLER_QUEUE,
                              HANDLE_EVENT_TASK,
                              dict(type=FIRE_EVENT, id=fire.id))

                except Exception:  # pragma: no cover
                    logger.error("Error running campaign event: %s" % fire.pk,
                                 exc_info=True)
コード例 #15
0
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'],
                             event_task.get('from_mage', False),
                             event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        with r.lock('fire_campaign_%s' % event_task['id'], timeout=120):
            event = EventFire.objects.filter(pk=event_task['id'],
                                             fired=None).first()
            if event:
                event.fire()

    else:
        raise Exception("Unexpected event type: %s" % event_task)
コード例 #16
0
ファイル: models.py プロジェクト: juniorsilver/casepro
    def get_or_open(cls, org, user, labels, message, summary, assignee, archive_messages=True):
        r = get_redis_connection()
        with r.lock('org:%d:cases_lock' % org.pk):
            # check for open case with this contact
            existing_open = cls.get_open_for_contact_on(org, message.contact, timezone.now())
            if existing_open:
                existing_open.is_new = False
                return existing_open

            # check for another case (possibly closed) connected to this message
            existing_for_msg = cls.objects.filter(message_id=message.id).first()
            if existing_for_msg:
                existing_for_msg.is_new = False
                return existing_for_msg

            case = cls.objects.create(org=org, assignee=assignee, contact_uuid=message.contact,
                                      summary=summary, message_id=message.id, message_on=message.created_on)
            case.is_new = True
            case.labels.add(*labels)

            CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

            # archive messages any labelled messages from this contact
            if archive_messages:
                Contact.archive_messages(org, message.contact)

        return case
コード例 #17
0
ファイル: tasks.py プロジェクト: Ebaneck/rapidpro
def squash_flowruncounts():
    r = get_redis_connection()

    key = 'squash_flowruncounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            FlowRunCount.squash_counts()
コード例 #18
0
ファイル: tasks.py プロジェクト: Maximus325/rapidpro
def squash_topupcredits():
    r = get_redis_connection()

    key = 'squash_topupcredits'
    if not r.get(key):
        with r.lock(key, timeout=900):
            TopUpCredits.squash_credits()
コード例 #19
0
ファイル: tasks.py プロジェクト: ewheeler/rapidpro
def process_run_timeout(run_id, timeout_on):
    """
    Processes a single run timeout
    """
    from temba.flows.models import FlowRun

    r = get_redis_connection()
    run = FlowRun.objects.filter(id=run_id, is_active=True, flow__is_active=True).first()

    if run:
        key = 'pcm_%d' % run.contact_id
        if not r.get(key):
            with r.lock(key, timeout=120):
                print "T[%09d] Processing timeout" % run.id
                start = time.time()

                run.refresh_from_db()

                # this is still the timeout to process (json doesn't have microseconds so close enough)
                if run.timeout_on and abs(run.timeout_on - timeout_on) < timedelta(milliseconds=1):
                    run.resume_after_timeout()
                else:
                    print "T[%09d] .. skipping timeout, already handled" % run.id

                print "T[%09d] %08.3f s" % (run.id, time.time() - start)
コード例 #20
0
ファイル: queues.py プロジェクト: MOconcepts/rapidpro
def push_task(org, queue, task_name, args, priority=DEFAULT_PRIORITY):
    """
    Adds a task to queue_name with the supplied arguments.

    Ex: add_task(nyaruka, 'flows', 'start_flow', [1,2,3,4,5,6,7,8,9,10])
    """
    r = get_redis_connection('default')

    # calculate our score from the current time and priority, this could get us in trouble
    # if things are queued for more than ~100 days, but otherwise gives us the properties of prioritizing
    # first based on priority, then insertion order.
    score = time.time() + priority

    # push our task onto the right queue and make sure it is in the active list (atomically)
    with r.pipeline() as pipe:
        key = "%s:%d" % (task_name, org.id)
        pipe.zadd(key, dict_to_json(args), score)

        # and make sure this key is in our list of queues so this job will get worked on
        pipe.sadd("%s:active" % task_name, key)
        pipe.execute()

    # if we were given a queue to schedule on, then add this task to celery.
    #
    # note that the task that is fired needs no arguments as it should just use pop_task with the
    # task name to determine what to work on.
    if queue:
        if getattr(settings, 'CELERY_ALWAYS_EAGER', False):
            task_function = lookup_task_function(task_name)
            task_function()
        else:
            current_app.send_task(task_name, args=[], kwargs={}, queue=queue)
コード例 #21
0
ファイル: queues.py プロジェクト: MOconcepts/rapidpro
def pop_task(task_name):
    """
    Pops the next 'random' task off our queue, returning the arguments that were saved

    Ex: pop_next_task('start_flow')
    <<< {flow=5, contacts=[1,2,3,4,5,6,7,8,9,10]}
    """
    r = get_redis_connection('default')

    task = None
    active_set = "%s:active" % task_name

    # get what queue we will work against
    queue = r.srandmember(active_set)

    while queue:
        # this lua script does both a "zpop" (popping the next highest thing off our sorted set) and
        # a clearing of our active set if there is no value in it as an atomic action
        lua = "local val = redis.call('zrange', ARGV[2], 0, 0) \n" \
              "if next(val) == nil then redis.call('srem', ARGV[1], ARGV[2]) return nil \n"\
              "else redis.call('zremrangebyrank', ARGV[2], 0, 0) return val[1] end\n"

        task = r.eval(lua, 2, 'active_set', 'queue', active_set, queue)

        # found a task? then break out
        if task is not None:
            task = json.loads(task)
            break

        # if we didn't get a task, then run again against a new queue until there is nothing left in our task queue
        queue = r.srandmember(active_set)

    return task
コード例 #22
0
ファイル: models.py プロジェクト: Spring-Apps/rapidpro
    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise ValueError("You must specify a contact field, ruleset or group to invalidate results for")

        if contact_field:
            key = CONTACT_KEY % contact_field.id
        elif group:
            key = GROUP_KEY % group.id
        elif ruleset:
            key = RULESET_KEY % ruleset.id

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        dependent_results = r.smembers(key)

        # save ourselves a roundtrip if there are no matches
        if dependent_results:
            # clear all our dependencies
            pipe = r.pipeline()
            pipe.srem(key, *dependent_results)
            pipe.delete(*dependent_results)
            pipe.execute()

        return len(dependent_results)
コード例 #23
0
def squash_topupcredits():
    r = get_redis_connection()

    key = 'squash_topupcredits'
    if not r.get(key):
        with r.lock(key, timeout=900):
            TopUpCredits.squash_credits()
コード例 #24
0
ファイル: tasks.py プロジェクト: ewheeler/rapidpro
def squash_systemlabels():
    r = get_redis_connection()

    key = 'squash_systemlabels'
    if not r.get(key):
        with r.lock(key, timeout=900):
            SystemLabel.squash_counts()
コード例 #25
0
ファイル: models.py プロジェクト: AbrahamKiggundu/rapidpro
    def invalidate_cache(cls, contact_field=None, ruleset=None, group=None):
        """
        Used to invalidate our summary cache for values. Callers should pass in one (and only one) of a contact field,
        ruleset or group that changed and all result summaries that have changed will be invalidated accordingly.
        :return: how many cached records were invalidated
        """
        if not contact_field and not ruleset and not group:
            raise Exception(
                "You must specify a contact field, ruleset or group to invalidate results for"
            )

        if contact_field:
            key = CONTACT_KEY % contact_field.id
        elif group:
            key = GROUP_KEY % group.id
        elif ruleset:
            key = RULESET_KEY % ruleset.id

        # blow away any redis items that contain our key as a dependency
        r = get_redis_connection()
        dependent_results = r.smembers(key)

        # save ourselves a roundtrip if there are no matches
        if dependent_results:
            # clear all our dependencies
            pipe = r.pipeline()
            pipe.srem(key, *dependent_results)
            pipe.delete(*dependent_results)
            pipe.execute()

        return len(dependent_results)
コード例 #26
0
ファイル: tasks.py プロジェクト: harykeyrun/rapidpro
def handle_event_task():
    """
    Priority queue task that handles both event fires (when fired) and new incoming
    messages that need to be handled.

    Currently two types of events may be "popped" from our queue:
           msg - Which contains the id of the Msg to be processed
          fire - Which contains the id of the EventFire that needs to be fired
    """
    from temba.campaigns.models import EventFire
    r = get_redis_connection()

    # pop off the next task
    event_task = pop_task(HANDLE_EVENT_TASK)

    # it is possible we have no message to send, if so, just return
    if not event_task:
        return

    if event_task['type'] == MSG_EVENT:
        process_message_task(event_task['id'], event_task.get('from_mage', False), event_task.get('new_contact', False))

    elif event_task['type'] == FIRE_EVENT:
        # use a lock to make sure we don't do two at once somehow
        with r.lock('fire_campaign_%s' % event_task['id'], timeout=120):
            event = EventFire.objects.filter(pk=event_task['id'], fired=None).first()
            if event:
                event.fire()

    else:
        raise Exception("Unexpected event type: %s" % event_task)
コード例 #27
0
ファイル: models.py プロジェクト: praekelt/casepro
    def get_or_open(cls, org, user, message, summary, assignee):
        from casepro.profiles.models import Notification

        r = get_redis_connection()
        with r.lock(CASE_LOCK_KEY % (org.pk, message.contact.uuid)):
            message.refresh_from_db()

            # if message is already associated with a case, return that
            if message.case:
                message.case.is_new = False
                return message.case

            # suspend from groups, expire flows and archive messages
            message.contact.prepare_for_case()

            case = cls.objects.create(org=org, assignee=assignee, initial_message=message, contact=message.contact,
                                      summary=summary)
            case.is_new = True
            case.labels.add(*list(message.labels.all()))  # copy labels from message to new case
            case.watchers.add(user)

            # attach message to this case
            message.case = case
            message.save(update_fields=('case',))

            action = CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

            for assignee_user in assignee.get_users():
                if assignee_user != user:
                    Notification.new_case_assignment(org, assignee_user, action)

        return case
コード例 #28
0
ファイル: queues.py プロジェクト: xuanhan863/rapidpro
def push_task(org, queue, task, args, priority=DEFAULT_PRIORITY):
    """
    Adds a task to queue_name with the supplied arguments.

    Ex: add_task(nyaruka, 'flows', 'start_flow', [1,2,3,4,5,6,7,8,9,10])
    """
    r = get_redis_connection('default')

    # calculate our score from the current time and priority, this could get us in trouble
    # if things are queued for more than ~100 days, but otherwise gives us the properties of prioritizing
    # first based on priority, then insertion order.
    score = time.time() + priority

    # push our task onto the right queue and make sure it is in the active list (atomically)
    with r.pipeline() as pipe:
        key = "%s:%d" % (task, org.id)
        pipe.zadd(key, dict_to_json(args), score)

        # and make sure this key is in our list of queues so this job will get worked on
        pipe.sadd("%s:active" % task, key)
        pipe.execute()

    # if we were given a queue to schedule on, then add this task to celery.
    #
    # note that the task that is fired needs no arguments as it should just use pop_task with the
    # task name to determine what to work on.
    if queue:
        if getattr(settings, 'CELERY_ALWAYS_EAGER', False):
            task_function = lookup_task_function(task)
            task_function()
        else:
            current_app.send_task(task, args=[], kwargs={}, queue=queue)
コード例 #29
0
ファイル: queues.py プロジェクト: xuanhan863/rapidpro
def pop_task(task_name):
    """
    Pops the next 'random' task off our queue, returning the arguments that were saved

    Ex: pop_next_task('start_flow')
    <<< {flow=5, contacts=[1,2,3,4,5,6,7,8,9,10]}
    """
    r = get_redis_connection('default')

    task = None
    active_set = "%s:active" % task_name

    # get what queue we will work against
    queue = r.srandmember(active_set)

    while queue:
        # this lua script does both a "zpop" (popping the next highest thing off our sorted set) and
        # a clearing of our active set if there is no value in it as an atomic action
        lua = "local val = redis.call('zrange', ARGV[2], 0, 0) \n" \
              "if next(val) == nil then redis.call('srem', ARGV[1], ARGV[2]) return nil \n"\
              "else redis.call('zremrangebyrank', ARGV[2], 0, 0) return val[1] end\n"

        task = r.eval(lua, 2, 'active_set', 'queue', active_set, queue)

        # found a task? then break out
        if not task is None:
            task = json.loads(task)
            break

        # if we didn't get a task, then run again against a new queue until there is nothing left in our task queue
        queue = r.srandmember(active_set)

    return task
コード例 #30
0
def send_msg_task():
    """
    Pops the next message off of our msg queue to send.
    """
    # pop off the next task
    msg_tasks = pop_task(SEND_MSG_TASK)

    # it is possible we have no message to send, if so, just return
    if not msg_tasks:
        return

    if not isinstance(msg_tasks, list):
        msg_tasks = [msg_tasks]

    r = get_redis_connection()

    # acquire a lock on our contact to make sure two sets of msgs aren't being sent at the same time
    try:
        with r.lock('send_contact_%d' % msg_tasks[0]['contact'], timeout=300):
            # send each of our msgs
            while msg_tasks:
                msg_task = msg_tasks.pop(0)
                msg = dict_to_struct('MockMsg', msg_task,
                                     datetime_fields=['modified_on', 'sent_on', 'created_on', 'queued_on', 'next_attempt'])
                Channel.send_message(msg)

                # if there are more messages to send for this contact, sleep a second before moving on
                if msg_tasks:
                    time.sleep(1)

    finally:  # pragma: no cover
        # if some msgs weren't sent for some reason, then requeue them for later sending
        if msg_tasks:
            # requeue any unsent msgs
            push_task(msg_tasks[0]['org'], MSG_QUEUE, SEND_MSG_TASK, msg_tasks)
コード例 #31
0
ファイル: tasks.py プロジェクト: Ebaneck/rapidpro
def squash_channelcounts():
    r = get_redis_connection()

    key = 'squash_channelcounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            ChannelCount.squash_counts()
コード例 #32
0
ファイル: tasks.py プロジェクト: mdheyab/rapidpro
def check_campaigns_task(sched_id=None):
    """
    See if any event fires need to be triggered
    """
    logger = check_campaigns_task.get_logger()

    # get a lock
    r = get_redis_connection()

    key = "check_campaigns"

    # only do this if we aren't already checking campaigns
    if not r.get(key):
        with r.lock(key, timeout=3600):
            # for each that needs to be fired
            for fire in EventFire.objects.filter(fired=None, scheduled__lte=timezone.now()):
                try:
                    key = "fire_campaign_%d" % fire.pk
                    if not r.get(key):
                        # try to acquire a lock
                        with r.lock("fire_campaign_%d" % fire.pk, timeout=120):
                            # reload it
                            fire = EventFire.objects.get(id=fire.pk)
                            if not fire.fired:
                                fire.fire()

                except:  # pragma: no cover
                    logger.error("Error running campaign event: %s" % fire.pk, exc_info=True)
コード例 #33
0
ファイル: tasks.py プロジェクト: weddingjuma/rapidpro-1
def process_message_task(msg_id, from_mage=False, new_contact=False):
    """
    Processes a single incoming message through our queue.
    """
    r = get_redis_connection()
    msg = Msg.objects.filter(pk=msg_id, status=PENDING).select_related(
        'org', 'contact', 'contact_urn', 'channel').first()

    # somebody already handled this message, move on
    if not msg:
        return

    # get a lock on this contact, we process messages one by one to prevent odd behavior in flow processing
    key = 'pcm_%d' % msg.contact_id
    if not r.get(key):
        with r.lock(key, timeout=120):
            print "M[%09d] Processing - %s" % (msg.id, msg.text)
            start = time.time()

            # if message was created in Mage...
            if from_mage:
                mage_handle_new_message(msg.org, msg)
                if new_contact:
                    mage_handle_new_contact(msg.org, msg.contact)

            Msg.process_message(msg)
            print "M[%09d] %08.3f s - %s" % (msg.id, time.time() - start,
                                             msg.text)
コード例 #34
0
ファイル: tasks.py プロジェクト: weddingjuma/rapidpro-1
def purge_broadcasts_task():
    """
    Looks for broadcasts older than 90 days and marks their messages as purged
    """

    r = get_redis_connection()

    # 90 days ago
    purge_date = timezone.now() - timedelta(days=90)
    key = 'purge_broadcasts_task'
    if not r.get(key):
        with r.lock(key, timeout=900):

            # determine which broadcasts are old
            broadcasts = Broadcast.objects.filter(created_on__lt=purge_date,
                                                  purged=False)

            for broadcast in broadcasts:
                # TODO actually delete messages!
                #
                # Need to also create Debit objects for topups associated with messages being deleted, and then
                # regularly squash those.

                broadcast.purged = True
                broadcast.save(update_fields=['purged'])
コード例 #35
0
ファイル: tasks.py プロジェクト: weddingjuma/rapidpro-1
def process_run_timeout(run_id, timeout_on):
    """
    Processes a single run timeout
    """
    from temba.flows.models import FlowRun

    r = get_redis_connection()
    run = FlowRun.objects.filter(id=run_id,
                                 is_active=True,
                                 flow__is_active=True).first()

    if run:
        key = 'pcm_%d' % run.contact_id
        if not r.get(key):
            with r.lock(key, timeout=120):
                print "T[%09d] Processing timeout" % run.id
                start = time.time()

                run.refresh_from_db()

                # this is still the timeout to process (json doesn't have microseconds so close enough)
                if run.timeout_on and abs(run.timeout_on -
                                          timeout_on) < timedelta(
                                              milliseconds=1):
                    run.resume_after_timeout()
                else:
                    print "T[%09d] .. skipping timeout, already handled" % run.id

                print "T[%09d] %08.3f s" % (run.id, time.time() - start)
コード例 #36
0
ファイル: tasks.py プロジェクト: tymiles003/rapidpro
def squash_flowruncounts():
    r = get_redis_connection()

    key = 'squash_flowruncounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            FlowRunCount.squash_counts()
コード例 #37
0
ファイル: models.py プロジェクト: digideskio/casepro
    def get_or_open(cls, org, user, message, summary, assignee):
        r = get_redis_connection()
        with r.lock(CASE_LOCK_KEY % (org.pk, message.contact.uuid)):
            # if message is already associated with a case, return that
            if message.case:
                message.case.is_new = False
                return message.case

            # if message contact has an open case, return that
            existing_open = cls.get_open_for_contact_on(org, message.contact, timezone.now())
            if existing_open:
                existing_open.is_new = False
                return existing_open

            # suspend from groups, expire flows and archive messages
            message.contact.prepare_for_case()

            case = cls.objects.create(org=org, assignee=assignee, initial_message=message, contact=message.contact,
                                      summary=summary)
            case.is_new = True
            case.labels.add(*list(message.labels.all()))  # copy labels from message to new case

            # attach message to this case
            message.case = case
            message.save(update_fields=('case',))

            CaseAction.create(case, user, CaseAction.OPEN, assignee=assignee)

        return case
コード例 #38
0
def remove_expired_flows_from_active(apps, schema_editor):
    r = get_redis_connection()
    for key in r.keys('*:step_active_set:*'):
        # make sure our flow run activity is removed
        runs = FlowRun.objects.filter(pk__in=r.smembers(key),
                                      is_active=False,
                                      contact__is_test=False)
        FlowRun.bulk_exit(runs, FlowRun.EXIT_TYPE_EXPIRED)
コード例 #39
0
def remove_expired_flows_from_active(apps, schema_editor):
    r = get_redis_connection()
    for key in r.keys('*:step_active_set:*'):
        # make sure our flow run activity is removed
        FlowRun.do_expire_runs(
            FlowRun.objects.filter(pk__in=r.smembers(key),
                                   is_active=False,
                                   contact__is_test=False))
コード例 #40
0
def flush():
    global KEY_WHITE_IP, KEY_WHITE_UA, KEY_BLACK_IP, KEY_BLACK_UA 
    global CACHE_WHITE_IP, CACHE_WHITE_UA, CACHE_BLACK_IP, CACHE_BLACK_UA
    conn = get_redis_connection(settings.ANTI_SPAM_CACHE_REDIS_KEY)
    CACHE_WHITE_IP = conn.smembers(KEY_WHITE_IP)
    CACHE_WHITE_UA = conn.smembers(KEY_WHITE_UA)
    CACHE_BLACK_IP = conn.smembers(KEY_BLACK_IP)
    CACHE_BLACK_UA = conn.smembers(KEY_BLACK_UA)
コード例 #41
0
ファイル: views.py プロジェクト: pika-shi/metro
def redis_check(request):
    con = get_redis_connection('default')
    hashs = {'key1':'kfs','dfas':'dasf'}
    cache.set('key1',hashs)
    tmp= cache.get('key1')
    

    return HttpResponse(render_to_response('index.html',{'title':'redis','logs':[con,tmp]},context_instance=RequestContext(request)))
コード例 #42
0
def generate_invoice_number(sequence_name=None):
    """
    WARNING: This method changes the state in Redis!
    """
    if sequence_name is None:
        sequence_name = app_settings.INVOICE_NUMBER_SEQUENCE_NAME
    conn = get_redis_connection()
    return int(conn.incr(sequence_name))
コード例 #43
0
ファイル: models.py プロジェクト: allenling/dbss
def cron_update_index(sender, **kwargs):
    index_redis = get_redis_connection('djrq')
    index_count = int(index_redis.incr(settings.INDEX_NAME))
    if index_count > settings.INDEX_COUNT + 1:
        index_redis.set(settings.INDEX_NAME, 0)
        index_queue = django_rq.get_queue(settings.INDEX_QUEUE)
        if index_queue.count < 1:
            index_queue.enqueue(warp_update_index)
コード例 #44
0
ファイル: utils.py プロジェクト: allenling/dbss
    def add_graphic(self):
        graphic_r = get_redis_connection('graphic')
        fdate = self.fcard.pub_date
        self.nowdate = fdate
        graphic_rid = str(self.fcard.mcard.id) + '_' + str(
            self.fcard.carduser.id)
        try:
            grap = Graphic.objects.get(card=self.fcard.mcard,
                                       fcard=self.fcard.mfcard)
            grap_json = simplejson.loads(grap.graphic)
            t_year = grap_json.get(str(fdate.year))
            if t_year:
                t_month = t_year.get(str(fdate.month))
                if t_month:
                    t_day = t_month.get(str(fdate.day))
                    if t_day:
                        t_day.append(str(fdate.time())[:8])
                    else:
                        t_month[fdate.day] = [str(fdate.time())[:8]]
                else:
                    t_year.update(
                        {fdate.month: {
                            fdate.day: [str(fdate.time())[:8]]
                        }})
            else:
                date_json = dict({
                    fdate.year: {
                        fdate.month: {
                            fdate.day: [str(fdate.time())[:8]]
                        }
                    }
                })
                grap_json.update(date_json)
            grap.graphic = simplejson.dumps(grap_json)
            grap.save(update_fields=['graphic'])
        except Graphic.DoesNotExist:
            grap_json = dict({
                fdate.year: {
                    fdate.month: {
                        fdate.day: [str(fdate.time())[:8]]
                    }
                }
            })
            grap = Graphic(graphic=simplejson.dumps(grap_json),
                           card=self.fcard.mcard,
                           fcard=self.fcard.mfcard)
            grap.save()
            self.fcard.graphic = grap

        self.fcard.save(update_fields=['graphic'])

        #update graphic redis
        try:
            nowdate_json = grap_json.get(str(fdate.year)).get(str(fdate.month))
        except AttributeError:
            nowdate_json = grap_json.get(fdate.year).get(fdate.month)

        self.update_gredis(graphic_r, graphic_rid, nowdate_json, fdate)
コード例 #45
0
def get_notifications(user):
    r = get_redis_connection()
    user_key = get_user_key(user)
    notifications = r.hgetall(user_key).values()
    notifications = [
        json.loads(notification) for notification in notifications
    ]
    notifications.sort(key=itemgetter('timestamp'))
    return notifications
コード例 #46
0
ファイル: views.py プロジェクト: allenling/dbss
 def get(self, request, *args, **kwargs):
     auth_card = self.get_object()
     graphic_r = get_redis_connection('graphic')
     allg = graphic_r.hgetall(str(auth_card.id) + '_' + kwargs.get('user_pk'))
     param = allg.get(kwargs.get('gyear')+'_'+kwargs.get("gmonth")+'_d')
     if param:
         param = simplejson.loads(param)
     rparam = param.get(kwargs.get("gday")) if param else []
     return HttpResponse(simplejson.dumps(rparam))
コード例 #47
0
ファイル: tags.py プロジェクト: LifeMoroz/faq
def add_tags(prefix, tagged_id, tags):
    # тут просто добавляем сразу много тегов
    r = get_redis_connection()
    for t in tags:
        if t.strip() == '':
            continue
        r.execute_command('ZINCRBY', key_all_tags, 1, t)
        r.sadd(key(prefix, tagged_id), t)
        r.sadd(tag_key(prefix, t), tagged_id)
コード例 #48
0
ファイル: utils.py プロジェクト: allenling/dbss
def updatefavlist(user_id, card_id, action, target):
    favlist_r = get_redis_connection('favlist')
    fa_list = favlist_r.hget(str(user_id), card_id)
    if fa_list == None or fa_list[0] != action :
        if target != None:
            action = action +'_'+target
        favlist_r.hset(str(user_id), card_id, action)
    else:
        new_action = fa_list + '_'+target
        favlist_r.hset(str(user_id), card_id, new_action)
コード例 #49
0
ファイル: tasks.py プロジェクト: Ebaneck/rapidpro
def squash_contactgroupcounts():
    """
    Squashes our ContactGroupCounts into single rows per ContactGroup
    """
    r = get_redis_connection()

    key = 'squash_channelcounts'
    if not r.get(key):
        with r.lock(key, timeout=900):
            ContactGroupCount.squash_counts()
コード例 #50
0
ファイル: views.py プロジェクト: allenling/dbss
 def get(self, request, *args,  **kwargs):
     auth_card = self.get_object()
     graphic_r = get_redis_connection('graphic')
     allg = graphic_r.hgetall(str(auth_card.id) + '_' + kwargs.get('user_pk'))
     tmp = {}
     year_p = re.compile('^\d{4}$')
     for i in allg:
         if re.match(year_p, i):
             tmp.update({i:allg[i]})
     return HttpResponse(simplejson.dumps(tmp))
コード例 #51
0
ファイル: views.py プロジェクト: yijingping/django-invcode
def index(request):
    redis = get_redis_connection('redis')
    first_generate(redis)
    incr_generate(redis)
    code = get_invcode(redis)
    res = {
        'ret': 0,
        'code': code
    }
    return HttpResponse(simplejson.dumps(res), mimetype="application/json; charset=utf-8")
コード例 #52
0
ファイル: utils.py プロジェクト: allenling/dbss
def updatefavlist(user_id, card_id, action, target):
    favlist_r = get_redis_connection('favlist')
    fa_list = favlist_r.hget(str(user_id), card_id)
    if fa_list == None or fa_list[0] != action:
        if target != None:
            action = action + '_' + target
        favlist_r.hset(str(user_id), card_id, action)
    else:
        new_action = fa_list + '_' + target
        favlist_r.hset(str(user_id), card_id, new_action)
コード例 #53
0
def calculate_flow_stats_task(flow_id):
    r = get_redis_connection()

    flow = Flow.objects.get(pk=flow_id)
    runs_started_cached = r.get(flow.get_stats_cache_key(FlowStatsCache.runs_started_count))
    runs_started_cached = 0 if runs_started_cached is None else int(runs_started_cached)
    runs_started = flow.runs.filter(contact__is_test=False).count()

    if runs_started != runs_started_cached:
        Flow.objects.get(pk=flow_id).do_calculate_flow_stats()
コード例 #54
0
ファイル: tasks.py プロジェクト: harykeyrun/rapidpro
def check_messages_task():
    """
    Checks to see if any of our aggregators have errored messages that need to be retried.
    Also takes care of flipping Contacts from Failed to Normal and back based on their status.
    """
    from django.utils import timezone
    from .models import INCOMING, OUTGOING, PENDING, QUEUED, ERRORED, FAILED, WIRED, SENT, DELIVERED
    from temba.contacts.models import NORMAL
    from temba.orgs.models import Org
    from temba.channels.tasks import send_msg_task

    r = get_redis_connection()

    # only do this if we aren't already running
    key = 'check_messages_task'
    if not r.get(key):
        with r.lock(key, timeout=900):
            now = timezone.now()
            five_minutes_ago = now - timedelta(minutes=5)

            # get any contacts that are currently normal that had a failed message in the past five minutes
            for contact in Contact.objects.filter(msgs__created_on__gte=five_minutes_ago, msgs__direction=OUTGOING,
                                                  msgs__status=FAILED, status=NORMAL):
                # if the last message from this contact is failed, then fail this contact
                if contact.msgs.all().order_by('-created_on').first().status == FAILED:
                    contact.fail()

            # get any contacts that are currently failed that had a normal message in the past five minutes
            for contact in Contact.objects.filter(msgs__created_on__gte=five_minutes_ago, msgs__direction=OUTGOING,
                                                  msgs__status__in=[WIRED, SENT, DELIVERED], status=FAILED):
                # if the last message from this contact is ok, then mark them as normal
                if contact.msgs.all().order_by('-created_on').first().status in [WIRED, SENT, DELIVERED]:
                    contact.unfail()

            # for any org that sent messages in the past five minutes, check for pending messages
            for org in Org.objects.filter(msgs__created_on__gte=five_minutes_ago).distinct():
                org.trigger_send()

            # fire a few send msg tasks in case we dropped one somewhere during a restart
            # (these will be no-ops if there is nothing to do)
            send_msg_task.delay()
            send_msg_task.delay()

            handle_event_task.delay()
            handle_event_task.delay()

            # also check any incoming messages that are still pending somehow, reschedule them to be handled
            unhandled_messages = Msg.objects.filter(direction=INCOMING, status=PENDING, created_on__lte=five_minutes_ago)
            unhandled_messages = unhandled_messages.exclude(channel__org=None).exclude(contact__is_test=True)
            unhandled_count = unhandled_messages.count()

            if unhandled_count:
                print "** Found %d unhandled messages" % unhandled_count
                for msg in unhandled_messages:
                    msg.handle()
コード例 #55
0
ファイル: tasks.py プロジェクト: weddingjuma/rapidpro-1
def collect_message_metrics_task():
    """
    Collects message metrics and sends them to our analytics.
    """
    from .models import INCOMING, OUTGOING, PENDING, QUEUED, ERRORED, INITIALIZING
    from temba.utils import analytics

    r = get_redis_connection()

    # only do this if we aren't already running
    key = 'collect_message_metrics'
    if not r.get(key):
        with r.lock(key, timeout=900):
            # current # of queued messages (excluding Android)
            count = Msg.objects.filter(direction=OUTGOING, status=QUEUED).exclude(channel=None).\
                exclude(topup=None).exclude(channel__channel_type='A').exclude(next_attempt__gte=timezone.now()).count()
            analytics.gauge('temba.current_outgoing_queued', count)

            # current # of initializing messages (excluding Android)
            count = Msg.objects.filter(
                direction=OUTGOING,
                status=INITIALIZING).exclude(channel=None).exclude(
                    topup=None).exclude(channel__channel_type='A').count()
            analytics.gauge('temba.current_outgoing_initializing', count)

            # current # of pending messages (excluding Android)
            count = Msg.objects.filter(
                direction=OUTGOING,
                status=PENDING).exclude(channel=None).exclude(
                    topup=None).exclude(channel__channel_type='A').count()
            analytics.gauge('temba.current_outgoing_pending', count)

            # current # of errored messages (excluding Android)
            count = Msg.objects.filter(
                direction=OUTGOING,
                status=ERRORED).exclude(channel=None).exclude(
                    topup=None).exclude(channel__channel_type='A').count()
            analytics.gauge('temba.current_outgoing_errored', count)

            # current # of android outgoing messages waiting to be sent
            count = Msg.objects.filter(
                direction=OUTGOING,
                status__in=[PENDING, QUEUED],
                channel__channel_type='A').exclude(channel=None).exclude(
                    topup=None).count()
            analytics.gauge('temba.current_outgoing_android', count)

            # current # of pending incoming messages that haven't yet been handled
            count = Msg.objects.filter(
                direction=INCOMING,
                status=PENDING).exclude(channel=None).count()
            analytics.gauge('temba.current_incoming_pending', count)

            # stuff into redis when we last run, we do this as a canary as to whether our tasks are falling behind or not running
            cache.set('last_cron', timezone.now())
コード例 #56
0
ファイル: tasks.py プロジェクト: xuanhan863/rapidpro
def collect_message_metrics_task():
    """
    Collects message metrics and sends them to our analytics.
    """
    from .models import INCOMING, OUTGOING, DELIVERED, SENT, WIRED, FAILED, PENDING, QUEUED, ERRORED, INITIALIZING, HANDLED
    import analytics

    r = get_redis_connection()

    # only do this if we aren't already running
    key = 'collect_message_metrics'
    if not r.get(key):
        with r.lock(key, timeout=900):
            # we use our hostname as our source so we can filter these for different brands
            context = dict(source=settings.HOSTNAME)

            # total # of delivered messages
            count = Msg.objects.filter(direction=OUTGOING, status=DELIVERED).exclude(channel=None).exclude(topup=None).count()
            analytics.track('System', 'temba.total_outgoing_delivered', properties=dict(value=count), context=context)

            # total # of sent messages (this includes delivered and wired)
            count = Msg.objects.filter(direction=OUTGOING, status__in=[DELIVERED, SENT, WIRED]).exclude(channel=None).exclude(topup=None).count()
            analytics.track('System', 'temba.total_outgoing_sent', properties=dict(value=count), context=context)

            # total # of failed messages
            count = Msg.objects.filter(direction=OUTGOING, status=FAILED).exclude(channel=None).exclude(topup=None).count()
            analytics.track('System', 'temba.total_outgoing_failed', properties=dict(value=count), context=context)

            # current # of queued messages (excluding Android)
            count = Msg.objects.filter(direction=OUTGOING, status=QUEUED).exclude(channel=None).exclude(topup=None).exclude(channel__channel_type='A').count()
            analytics.track('System', 'temba.current_outgoing_queued', properties=dict(value=count), context=context)

            # current # of initializing messages (excluding Android)
            count = Msg.objects.filter(direction=OUTGOING, status=INITIALIZING).exclude(channel=None).exclude(topup=None).exclude(channel__channel_type='A').count()
            analytics.track('System', 'temba.current_outgoing_initializing', properties=dict(value=count), context=context)

            # current # of pending messages (excluding Android)
            count = Msg.objects.filter(direction=OUTGOING, status=PENDING).exclude(channel=None).exclude(topup=None).exclude(channel__channel_type='A').count()
            analytics.track('System', 'temba.current_outgoing_pending', properties=dict(value=count), context=context)

            # current # of errored messages (excluding Android)
            count = Msg.objects.filter(direction=OUTGOING, status=ERRORED).exclude(channel=None).exclude(topup=None).exclude(channel__channel_type='A').count()
            analytics.track('System', 'temba.current_outgoing_errored', properties=dict(value=count), context=context)

            # current # of android outgoing messages waiting to be sent
            count = Msg.objects.filter(direction=OUTGOING, status__in=[PENDING, QUEUED], channel__channel_type='A').exclude(channel=None).exclude(topup=None).count()
            analytics.track('System', 'temba.current_outgoing_android', properties=dict(value=count), context=context)

            # current # of pending incoming messages that haven't yet been handled
            count = Msg.objects.filter(direction=INCOMING, status=PENDING).exclude(channel=None).count()
            analytics.track('System', 'temba.current_incoming_pending', properties=dict(value=count), context=context)

            # stuff into redis when we last run, we do this as a canary as to whether our tasks are falling behind or not running
            cache.set('last_cron', timezone.now())