コード例 #1
0
def handle_fixture_location_update(sender, doc, diff, backend, **kwargs):
    if doc.get('doc_type') == 'XFormInstance' and doc.get(
            'domain') in M4CHANGE_DOMAINS:
        xform = XFormInstance.wrap(doc)
        if hasattr(xform, "xmlns") and xform.xmlns in ALL_M4CHANGE_FORMS:
            location_id = xform.form.get("location_id", None)
            if not location_id:
                return
            client = get_redis_client()
            redis_key = REDIS_FIXTURE_KEYS[xform.domain]
            redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[xform.domain]
            lock = get_redis_lock(redis_lock_key,
                                  timeout=5,
                                  name=redis_lock_key)
            if lock.acquire(blocking=True):
                try:
                    location_ids_str = client.get(redis_key)
                    location_ids = []
                    if location_ids_str:
                        location_ids = json.loads(location_ids_str)
                    if location_id not in location_ids:
                        location_ids.append(location_id)
                    client.set(redis_key, json.dumps(location_ids))
                finally:
                    release_lock(lock, True)
コード例 #2
0
def rebuild_and_diff_cases(sql_case, couch_case, original_couch_case, diff,
                           dd_count):
    """Try rebuilding SQL case and save if rebuild resolves diffs

    :param sql_case: CommCareCaseSQL object.
    :param couch_case: JSON-ified version of CommCareCase.
    :param diff: function to produce diffs between couch and SQL case JSON.
    :param dd_count: metrics recording counter function.
    :returns: list of diffs returned by `diff(couch_case, rebuilt_case_json)`
    """
    lock = CommCareCaseSQL.get_obj_lock_by_id(sql_case.case_id)
    acquire_lock(lock, degrade_gracefully=False)
    try:
        if should_sort_sql_transactions(sql_case, couch_case):
            new_case = rebuild_case_with_couch_action_order(
                sql_case, couch_case)
            dd_count("commcare.couchsqlmigration.case.rebuild.sql.sort")
        else:
            new_case = rebuild_case(sql_case)
            dd_count("commcare.couchsqlmigration.case.rebuild.sql")
        sql_json = new_case.to_json()
        diffs = diff(couch_case, sql_json)
        if diffs:
            original_diffs = diff(original_couch_case, sql_json)
            if not original_diffs:
                log.info("original Couch case matches rebuilt SQL case: %s",
                         sql_case.case_id)
                diffs = original_diffs
        if not diffs:
            # save case only if rebuild resolves diffs
            CaseAccessorSQL.save_case(new_case)
            publish_case_saved(new_case)
    finally:
        release_lock(lock, degrade_gracefully=True)
    return sql_json, diffs
コード例 #3
0
ファイル: processor.py プロジェクト: dimagi/commcare-hq
    def hard_rebuild_case(domain, case_id, detail, lock=True):
        if lock:
            # only record metric if locking since otherwise it has been
            # (most likley) recorded elsewhere
            case_load_counter("rebuild_case", domain)()
        case, lock_obj = FormProcessorSQL.get_case_with_lock(case_id, lock=lock)
        found = bool(case)
        if not found:
            case = CommCareCaseSQL(case_id=case_id, domain=domain)
            if lock:
                lock_obj = CommCareCaseSQL.get_obj_lock_by_id(case_id)
                acquire_lock(lock_obj, degrade_gracefully=False)

        try:
            assert case.domain == domain, (case.domain, domain)
            case, rebuild_transaction = FormProcessorSQL._rebuild_case_from_transactions(case, detail)
            if case.is_deleted and not case.is_saved():
                return None

            case.server_modified_on = rebuild_transaction.server_date
            CaseAccessorSQL.save_case(case)
            publish_case_saved(case)
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)
コード例 #4
0
ファイル: tasks.py プロジェクト: tlwakwella/commcare-hq
def process_sms(queued_sms_pk):
    """
    queued_sms_pk - pk of a QueuedSMS entry
    """
    client = get_redis_client()
    utcnow = get_utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock(client, "sms-queue-processing-%s" % queued_sms_pk)

    if message_lock.acquire(blocking=False):
        try:
            msg = QueuedSMS.objects.get(pk=queued_sms_pk)
        except QueuedSMS.DoesNotExist:
            # The message was already processed and removed from the queue
            release_lock(message_lock, True)
            return

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            remove_from_queue(msg)
            release_lock(message_lock, True)
            return

        if msg.direction == OUTGOING:
            if msg.domain:
                domain_object = Domain.get_by_name(msg.domain)
            else:
                domain_object = None
            if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING
        if (isinstance(msg.processed, bool)
            and not msg.processed
            and not msg.error
            and msg.datetime_to_process < utcnow):
            if recipient_block:
                recipient_lock = get_lock(client, 
                    "sms-queue-recipient-phone-%s" % msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                handle_incoming(msg)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)
                remove_from_queue(msg)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            process_sms.delay(queued_sms_pk)
コード例 #5
0
ファイル: casedb_base.py プロジェクト: kkrampa/commcare-hq
    def __exit__(self, exc_type, exc_val, exc_tb):
        for lock in self.locks:
            if lock is not None:
                release_lock(lock, True)
        self.locks = []

        if self.lock_stack:
            self.locks = self.lock_stack.pop()
コード例 #6
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        for lock in self.locks:
            if lock is not None:
                release_lock(lock, True)
        self.locks = []

        if self.lock_stack:
            self.locks = self.lock_stack.pop()
コード例 #7
0
 def enqueue(self, item):
     queue_name = self.get_queue_name()
     enqueuing_lock = self.get_enqueuing_lock(
         "%s-enqueuing-%s-%s" % (queue_name, item.id, item.key))
     if enqueuing_lock.acquire(blocking=False):
         try:
             self.enqueue_item(item)
         except:
             # We couldn't enqueue, so release the lock
             release_lock(enqueuing_lock, True)
コード例 #8
0
 def enqueue(self, item):
     queue_name = self.get_queue_name()
     enqueuing_lock = self.get_enqueuing_lock(
         "%s-enqueuing-%s-%s" % (queue_name, item.id, item.key))
     if enqueuing_lock.acquire(blocking=False):
         try:
             self.enqueue_item(item)
         except:
             # We couldn't enqueue, so release the lock
             release_lock(enqueuing_lock, True)
コード例 #9
0
 def enqueue(self, item, redis_client=None):
     client = redis_client or get_redis_client()
     queue_name = self.get_queue_name()
     enqueuing_lock = self.get_enqueuing_lock(client,
         "%s-enqueuing-%s-%s" % (queue_name, item.id, item.key))
     if enqueuing_lock.acquire(blocking=False):
         try:
             self.enqueue_item(item)
         except:
             # We couldn't enqueue, so release the lock
             release_lock(enqueuing_lock, True)
コード例 #10
0
ファイル: generic_queue.py プロジェクト: ansarbek/commcare-hq
 def enqueue(self, item_id, process_datetime_str, redis_client=None):
     client = redis_client or get_redis_client()
     queue_name = self.get_queue_name()
     enqueuing_lock = self.get_enqueuing_lock(client,
         "%s-enqueuing-%s-%s" % (queue_name, item_id, process_datetime_str))
     if enqueuing_lock.acquire(blocking=False):
         try:
             self.enqueue_item(item_id)
         except:
             # We couldn't enqueue, so release the lock
             release_lock(enqueuing_lock, True)
コード例 #11
0
def handle_outgoing(msg):
    """
    Should return a requeue flag, so if it returns True, the message will be
    requeued and processed again immediately, and if it returns False, it will
    not be queued again.
    """
    backend = msg.outbound_backend
    sms_rate_limit = backend.get_sms_rate_limit()
    use_rate_limit = sms_rate_limit is not None
    use_load_balancing = isinstance(backend, PhoneLoadBalancingMixin)
    max_simultaneous_connections = backend.get_max_simultaneous_connections()
    orig_phone_number = None

    if use_load_balancing:
        orig_phone_number = backend.get_next_phone_number(msg.phone_number)

    if use_rate_limit:
        if use_load_balancing:
            redis_key = 'sms-rate-limit-backend-%s-phone-%s' % (
                backend.pk, orig_phone_number)
        else:
            redis_key = 'sms-rate-limit-backend-%s' % backend.pk

        if not rate_limit(
                redis_key, actions_allowed=sms_rate_limit, how_often=60):
            # Requeue the message and try it again shortly
            return True

    if max_simultaneous_connections:
        connection_slot_lock = get_connection_slot_lock(
            msg.phone_number, backend, max_simultaneous_connections)
        if not connection_slot_lock.acquire(blocking=False):
            # Requeue the message and try it again shortly
            return True

    if passes_trial_check(msg):
        result = send_message_via_backend(msg,
                                          backend=backend,
                                          orig_phone_number=orig_phone_number)

    if max_simultaneous_connections:
        release_lock(connection_slot_lock, True)

    if msg.error:
        remove_from_queue(msg)
    else:
        # Only do the following if an unrecoverable error did not happen
        if result:
            handle_successful_processing_attempt(msg)
        else:
            handle_unsuccessful_processing_attempt(msg)

    return False
コード例 #12
0
ファイル: tasks.py プロジェクト: dimagi/commcare-hq
def handle_outgoing(msg):
    """
    Should return a requeue flag, so if it returns True, the message will be
    requeued and processed again immediately, and if it returns False, it will
    not be queued again.
    """
    backend = msg.outbound_backend
    sms_rate_limit = backend.get_sms_rate_limit()
    use_rate_limit = sms_rate_limit is not None
    use_load_balancing = isinstance(backend, PhoneLoadBalancingMixin)
    max_simultaneous_connections = backend.get_max_simultaneous_connections()
    orig_phone_number = None

    if use_load_balancing:
        orig_phone_number = backend.get_next_phone_number(msg.phone_number)

    if use_rate_limit:
        if use_load_balancing:
            redis_key = 'sms-rate-limit-backend-%s-phone-%s' % (backend.pk, orig_phone_number)
        else:
            redis_key = 'sms-rate-limit-backend-%s' % backend.pk

        if not rate_limit(redis_key, actions_allowed=sms_rate_limit, how_often=60):
            # Requeue the message and try it again shortly
            return True

    if max_simultaneous_connections:
        connection_slot_lock = get_connection_slot_lock(msg.phone_number, backend, max_simultaneous_connections)
        if not connection_slot_lock.acquire(blocking=False):
            # Requeue the message and try it again shortly
            return True

    if passes_trial_check(msg):
        result = send_message_via_backend(
            msg,
            backend=backend,
            orig_phone_number=orig_phone_number
        )

    if max_simultaneous_connections:
        release_lock(connection_slot_lock, True)

    if msg.error:
        remove_from_queue(msg)
    else:
        # Only do the following if an unrecoverable error did not happen
        if result:
            handle_successful_processing_attempt(msg)
        else:
            handle_unsuccessful_processing_attempt(msg)

    return False
コード例 #13
0
ファイル: mixin.py プロジェクト: ekush/commcare-hq
 def finish(self, save_stats=True, raise_exc=False):
     try:
         if (save_stats and self.stats_key and self.stats
                 and self.redis_client):
             dumpable = {}
             for k, v in self.stats.items():
                 dumpable[k] = [json_format_datetime(t) for t in v]
             self.redis_client.set(self.stats_key, json.dumps(dumpable))
         if self.lock:
             release_lock(self.lock, True)
     except:
         if raise_exc:
             raise
コード例 #14
0
ファイル: mixin.py プロジェクト: johan--/commcare-hq
 def finish(self, save_stats=True, raise_exc=False):
     try:
         if (save_stats and self.stats_key and self.stats and
             self.redis_client):
             dumpable = {}
             for k, v in self.stats.items():
                 dumpable[k] = [json_format_datetime(t) for t in v]
             self.redis_client.set(self.stats_key, json.dumps(dumpable))
         if self.lock:
             release_lock(self.lock, True)
     except:
         if raise_exc:
             raise
コード例 #15
0
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = get_redis_lock(key, timeout=timeout, name=fn.__name__)
            if lock.acquire(blocking=False):
                try:
                    return fn(*args, **kwargs)
                finally:
                    release_lock(lock, True)
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
コード例 #16
0
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = get_redis_lock(key, timeout=timeout, name=fn.__name__)
            if lock.acquire(blocking=False):
                try:
                    return fn(*args, **kwargs)
                finally:
                    release_lock(lock, True)
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
コード例 #17
0
ファイル: api.py プロジェクト: philipkaare/commcare-hq
def get_randomized_message(case, order):
    if order >= 0 and order <= 279:
        client = get_redis_client()
        lock = client.lock("fri-randomization-%s" % case._id, timeout=300)

        lock.acquire(blocking=True)
        if not already_randomized(case):
            randomize_messages(case)
        release_lock(lock, True)

        message = FRIRandomizedMessage.view(
            "fri/randomized_message", key=[case.domain, case._id, order], include_docs=True
        ).one()
        return message
    else:
        return None
コード例 #18
0
def test_get_case_with_lock(self, lock, wrap):
    case, case_lock = FormProcessorCouch.get_case_with_lock(self.case_id, lock, wrap)

    try:
        if lock:
            self.assertIsNotNone(case_lock)
        else:
            self.assertIsNone(case_lock)

        if wrap:
            self.assertEqual(len(case.actions), 2)
        else:
            self.assertEqual('actions' in case, True)

        self.assertIsInstance(case, CommCareCase if wrap else dict)
    finally:
        release_lock(case_lock, True)
コード例 #19
0
def create_and_lock_xform(instance,
                          attachments=None,
                          process=None,
                          domain=None,
                          _id=None):
    """
    Save a new xform to couchdb in a thread-safe manner
    Returns a LockManager containing the new XFormInstance and its lock,
    or raises an exception if anything goes wrong.

    attachments is a dictionary of the request.FILES that are not the xform;
    key is parameter name, value is django MemoryFile object stream

    """
    attachments = attachments or {}

    try:
        doc_id, lock = create_xform_from_xml(instance,
                                             process=process,
                                             _id=_id)
    except couchforms.XMLSyntaxError as e:
        doc = _log_hard_failure(instance, attachments, e)
        raise SubmissionError(doc)
    except DuplicateError:
        return _handle_id_conflict(instance,
                                   attachments,
                                   process=process,
                                   domain=domain)

    try:
        xform = XFormInstance.get(doc_id)
        for key, value in attachments.items():
            xform.put_attachment(value,
                                 name=key,
                                 content_type=value.content_type,
                                 content_length=value.size)
    except Exception as e:
        logging.exception("Problem with form %s" % doc_id)
        # "rollback" by changing the doc_type to XFormError
        xform = XFormError.get(doc_id)
        xform.problem = unicode(e)
        xform.save()
        release_lock(lock, degrade_gracefully=True)
        lock = None
    return LockManager(xform, lock)
コード例 #20
0
ファイル: views.py プロジェクト: OpenFn/commcare-hq
 def func_wrapper(request, *args, **kwargs):
     key = location_lock_key(request.domain)
     client = get_redis_client()
     lock = client.lock(key, timeout=LOCK_LOCATIONS_TIMEOUT)
     if lock.acquire(blocking=False):
         try:
             return func(request, *args, **kwargs)
         finally:
             release_lock(lock, True)
     else:
         message = _("Some of the location edits are still in progress, "
                     "please wait until they finish and then try again")
         messages.warning(request, message)
         if request.method == 'DELETE':
             # handle delete_location view
             return json_response({'success': False, 'message': message})
         else:
             return HttpResponseRedirect(request.META['HTTP_REFERER'])
コード例 #21
0
ファイル: views.py プロジェクト: kkrampa/commcare-hq
 def func_wrapper(request, *args, **kwargs):
     key = "import_locations_async-{domain}".format(domain=request.domain)
     client = get_redis_client()
     lock = client.lock(key, timeout=LOCK_LOCATIONS_TIMEOUT)
     if lock.acquire(blocking=False):
         try:
             return func(request, *args, **kwargs)
         finally:
             release_lock(lock, True)
     else:
         message = _("Some of the location edits are still in progress, "
                     "please wait until they finish and then try again")
         messages.warning(request, message)
         if request.method == 'DELETE':
             # handle delete_location view
             return json_response({'success': False, 'message': message})
         else:
             return HttpResponseRedirect(request.META['HTTP_REFERER'])
コード例 #22
0
def generate_fixtures_for_locations():

    client = get_redis_client()
    start_date, end_date = get_last_n_months(1)[0]
    db = FixtureReportResult.get_db()
    data_source = M4ChangeReportDataSource()

    for domain in M4CHANGE_DOMAINS:
        redis_key = REDIS_FIXTURE_KEYS[domain]
        redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[domain]
        lock = client.lock(redis_lock_key, timeout=5)
        location_ids = []
        if lock.acquire(blocking=True):
            try:
                location_ids_str = client.get(redis_key)
                location_ids = json.loads(
                    location_ids_str if location_ids_str else "[]")
                client.set(redis_key, '[]')
            finally:
                release_lock(lock, True)
        for location_id in location_ids:

            data_source.configure(
                config={
                    "startdate": start_date,
                    "enddate": end_date,
                    "location_id": location_id,
                    "domain": domain
                })
            report_data = data_source.get_data()

            for report_slug in report_data:

                # Remove cached fixture docs
                db.delete_docs(
                    FixtureReportResult.all_by_composite_key(
                        domain, location_id, json_format_date(start_date),
                        json_format_date(end_date), report_slug))
                rows = dict(report_data[report_slug].get("data", []))
                name = report_data[report_slug].get("name")
                FixtureReportResult.save_result(domain, location_id,
                                                start_date.date(),
                                                end_date.date(), report_slug,
                                                rows, name)
コード例 #23
0
ファイル: processor.py プロジェクト: dimagi/commcare-hq
    def hard_rebuild_case(domain, case_id, detail, save=True, lock=True):
        if lock:
            # only record metric if locking since otherwise it has been
            # (most likley) recorded elsewhere
            case_load_counter("rebuild_case", domain)()
        case, lock_obj = FormProcessorCouch.get_case_with_lock(case_id, lock=lock, wrap=True)
        found = bool(case)
        if not found:
            case = CommCareCase()
            case.case_id = case_id
            case.domain = domain
            if lock:
                lock_obj = CommCareCase.get_obj_lock_by_id(case_id)
                acquire_lock(lock_obj, degrade_gracefully=False)

        try:
            assert case.domain == domain, (case.domain, domain)
            forms = FormProcessorCouch.get_case_forms(case_id)
            form_load_counter("rebuild_case", domain)(len(forms))
            filtered_forms = [f for f in forms if f.is_normal]
            sorted_forms = sorted(filtered_forms, key=lambda f: f.received_on)

            actions = _get_actions_from_forms(domain, sorted_forms, case_id)

            if not found and case.domain is None:
                case.domain = domain

            rebuild_case_from_actions(case, actions)
            # todo: should this move to case.rebuild?
            if not case.xform_ids:
                if not found:
                    return None
                # there were no more forms. 'delete' the case
                case.doc_type = 'CommCareCase-Deleted'

            # add a "rebuild" action
            case.actions.append(_rebuild_action())
            if save:
                case.save()
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)
コード例 #24
0
def process_sms(message_id):
    """
    message_id - _id of an SMSLog entry
    """
    client = get_redis_client()
    utcnow = datetime.utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)

    if message_lock.acquire(blocking=False):
        msg = SMSLog.get(message_id)

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            release_lock(message_lock, True)
            return

        if msg.direction == OUTGOING:
            if msg.domain:
                domain_object = Domain.get_by_name(msg.domain, strict=True)
            else:
                domain_object = None
            if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING
        if (isinstance(msg.processed, bool)
            and not msg.processed
            and not msg.error
            and msg.datetime_to_process < utcnow):
            if recipient_block:
                recipient_lock = get_lock(client, 
                    "sms-queue-recipient-phone-%s" % msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                handle_incoming(msg)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            process_sms.delay(message_id)
コード例 #25
0
ファイル: tasks.py プロジェクト: nnestle/commcare-hq
def process_sms(message_id):
    """
    message_id - _id of an SMSLog entry
    """
    client = get_redis_client()
    utcnow = datetime.utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock(client, "sms-queue-processing-%s" % message_id)

    if message_lock.acquire(blocking=False):
        msg = SMSLog.get(message_id)

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            release_lock(message_lock, True)
            return

        if msg.direction == OUTGOING:
            if msg.domain:
                domain_object = Domain.get_by_name(msg.domain, strict=True)
            else:
                domain_object = None
            if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING
        if (isinstance(msg.processed, bool)
            and not msg.processed
            and not msg.error
            and msg.datetime_to_process < utcnow):
            if recipient_block:
                recipient_lock = get_lock(client, 
                    "sms-queue-recipient-phone-%s" % msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                handle_incoming(msg)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            process_sms.delay(message_id)
コード例 #26
0
ファイル: tasks.py プロジェクト: ansarbek/commcare-hq
def generate_fixtures_for_locations():

    client = get_redis_client()
    start_date, end_date = get_last_n_months(1)[0]
    db = FixtureReportResult.get_db()
    data_source = M4ChangeReportDataSource()

    for domain in M4CHANGE_DOMAINS:
        redis_key = REDIS_FIXTURE_KEYS[domain]
        redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[domain]
        lock = client.lock(redis_lock_key, timeout=5)
        location_ids = []
        if lock.acquire(blocking=True):
            try:
                location_ids_str = client.get(redis_key)
                location_ids = json.loads(location_ids_str if location_ids_str else "[]")
                client.set(redis_key, '[]')
            finally:
                release_lock(lock, True)
        for location_id in location_ids:

            data_source.configure(config={
                "startdate": start_date,
                "enddate": end_date,
                "location_id": location_id,
                "domain": domain
            })
            report_data = data_source.get_data()

            for report_slug in report_data:

                # Remove cached fixture docs
                db.delete_docs(
                    FixtureReportResult.all_by_composite_key(
                        domain, location_id, json_format_date(start_date),
                        json_format_date(end_date), report_slug)
                )
                rows = dict(report_data[report_slug].get("data", []))
                name = report_data[report_slug].get("name")
                FixtureReportResult.save_result(domain, location_id, start_date.date(), end_date.date(),
                                                report_slug, rows, name)
コード例 #27
0
ファイル: util.py プロジェクト: SEL-Columbia/commcare-hq
def create_and_lock_xform(instance, attachments=None, process=None,
                          domain=None, _id=None):
    """
    Save a new xform to couchdb in a thread-safe manner
    Returns a LockManager containing the new XFormInstance and its lock,
    or raises an exception if anything goes wrong.

    attachments is a dictionary of the request.FILES that are not the xform;
    key is parameter name, value is django MemoryFile object stream

    """
    attachments = attachments or {}

    try:
        doc_id, lock = create_xform_from_xml(instance, process=process,
                                             _id=_id)
    except couchforms.XMLSyntaxError as e:
        doc = _log_hard_failure(instance, attachments, e)
        raise SubmissionError(doc)
    except DuplicateError:
        return _handle_id_conflict(instance, attachments, process=process,
                                   domain=domain)

    try:
        xform = XFormInstance.get(doc_id)
        for key, value in attachments.items():
            xform.put_attachment(
                value,
                name=key,
                content_type=value.content_type,
                content_length=value.size
            )
    except Exception as e:
        logging.exception("Problem with form %s" % doc_id)
        # "rollback" by changing the doc_type to XFormError
        xform = XFormError.get(doc_id)
        xform.problem = unicode(e)
        xform.save()
        release_lock(lock, degrade_gracefully=True)
        lock = None
    return LockManager(xform, lock)
コード例 #28
0
ファイル: processor.py プロジェクト: kkrampa/commcare-hq
    def hard_rebuild_case(domain, case_id, detail, lock=True):
        case, lock_obj = FormProcessorSQL.get_case_with_lock(case_id, lock=lock)
        found = bool(case)
        if not found:
            case = CommCareCaseSQL(case_id=case_id, domain=domain)
            if lock:
                lock_obj = CommCareCaseSQL.get_obj_lock_by_id(case_id)
                acquire_lock(lock_obj, degrade_gracefully=False)

        try:
            assert case.domain == domain, (case.domain, domain)
            case, rebuild_transaction = FormProcessorSQL._rebuild_case_from_transactions(case, detail)
            if case.is_deleted and not case.is_saved():
                return None

            case.server_modified_on = rebuild_transaction.server_date
            CaseAccessorSQL.save_case(case)
            publish_case_saved(case)
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)
コード例 #29
0
ファイル: decorators.py プロジェクト: homck007/commcare-hq
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            client = get_redis_client()
            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = client.lock(key, timeout=timeout)
            if lock.acquire(blocking=False):
                try:
                    # Actually call the function
                    ret_val = fn(*args, **kwargs)
                except Exception:
                    # Don't leave the lock around if the task fails
                    release_lock(lock, True)
                    raise

                release_lock(lock, True)
                return ret_val
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
コード例 #30
0
ファイル: signals.py プロジェクト: kkrampa/commcare-hq
def handle_fixture_location_update(sender, doc, diff, backend, **kwargs):
    if doc.get('doc_type') == 'XFormInstance' and doc.get('domain') in M4CHANGE_DOMAINS:
        xform = XFormInstance.wrap(doc)
        if hasattr(xform, "xmlns") and xform.xmlns in ALL_M4CHANGE_FORMS:
            location_id = xform.form.get("location_id", None)
            if not location_id:
                return
            client = get_redis_client()
            redis_key = REDIS_FIXTURE_KEYS[xform.domain]
            redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[xform.domain]
            lock = client.lock(redis_lock_key, timeout=5)
            if lock.acquire(blocking=True):
                try:
                    location_ids_str = client.get(redis_key)
                    location_ids = []
                    if location_ids_str:
                        location_ids = json.loads(location_ids_str)
                    if location_id not in location_ids:
                        location_ids.append(location_id)
                    client.set(redis_key, json.dumps(location_ids))
                finally:
                    release_lock(lock, True)
コード例 #31
0
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            client = get_redis_client()
            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = client.lock(key, timeout=timeout)
            if lock.acquire(blocking=False):
                try:
                    # Actually call the function
                    ret_val = fn(*args, **kwargs)
                except Exception:
                    # Don't leave the lock around if the task fails
                    release_lock(lock, True)
                    raise

                release_lock(lock, True)
                return ret_val
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
コード例 #32
0
ファイル: tasks.py プロジェクト: zbidi/commcare-hq
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        try:
            change_metadata = change.metadata
            if change_metadata:
                document_store = get_document_store(
                    data_source_type=change_metadata.data_source_type,
                    data_source_name=change_metadata.data_source_name,
                    domain=change_metadata.domain
                )
                change.document_store = document_store
            pillow.process_change(change)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
コード例 #33
0
ファイル: tasks.py プロジェクト: yonglehou/commcare-hq
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock("pillow-retry-processing-%s" % error_doc_id,
                       timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT *
                       60)
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into.") %
                         pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff(
                ) + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow
                if isinstance(pillow, ConfigurableReportKafkaPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
コード例 #34
0
def locked_form(xform, interface):
    """Context manager that locks a form and checks/prepares for duplicates

    The lock is acquired on context manager enter and released on exit.

    Historically this locked both the new (possibly duplicate) form ID
    as well as any new ID generated in the process of handling a
    duplicate form ID. The submitted form ID is still locked, but the
    newly generated ID is not since it should be globally unique.
    """
    context = [xform]
    lock = interface.acquire_lock_for_xform(xform.form_id)
    try:
        if interface.is_duplicate(xform.form_id):
            new_form, dup_form = _handle_id_conflict(xform, xform.domain)
            if dup_form:
                assert dup_form.form_id != new_form.form_id, (new_form, dup_form)
                context = [new_form, dup_form]
            else:
                assert new_form is xform, (new_form, xform)
        yield context
    finally:
        release_lock(lock, degrade_gracefully=True)
コード例 #35
0
ファイル: form.py プロジェクト: dimagi/commcare-hq
def locked_form(xform, interface):
    """Context manager that locks a form and checks/prepares for duplicates

    The lock is acquired on context manager enter and released on exit.

    Historically this locked both the new (possibly duplicate) form ID
    as well as any new ID generated in the process of handling a
    duplicate form ID. The submitted form ID is still locked, but the
    newly generated ID is not since it should be globally unique.
    """
    context = [xform]
    lock = interface.acquire_lock_for_xform(xform.form_id)
    try:
        if interface.is_duplicate(xform.form_id):
            new_form, dup_form = _handle_id_conflict(xform, xform.domain)
            if dup_form:
                assert dup_form.form_id != new_form.form_id, (new_form, dup_form)
                context = [new_form, dup_form]
            else:
                assert new_form is xform, (new_form, xform)
        yield context
    finally:
        release_lock(lock, degrade_gracefully=True)
コード例 #36
0
    @classmethod
    def tearDownClass(cls):
        cls.case.delete()
        super(TestFormProcessorCouch, cls).tearDownClass()


@generate_cases([
    (True, True),
    (True, False),
    (False, True),
    (False, False),
], TestFormProcessorCouch)
def test_get_case_with_lock(self, lock, wrap):
    case, case_lock = FormProcessorCouch.get_case_with_lock(
        self.case_id, lock, wrap)

    try:
        if lock:
            self.assertIsNotNone(case_lock)
        else:
            self.assertIsNone(case_lock)

        if wrap:
            self.assertEqual(len(case.actions), 2)
        else:
            self.assertEqual('actions' in case, True)

        self.assertIsInstance(case, CommCareCase if wrap else dict)
    finally:
        release_lock(case_lock, True)
コード例 #37
0
ファイル: mixin.py プロジェクト: ekush/commcare-hq
    def _get_next_phone_number(self, redis_client):
        """
        Gets the least-used phone number from self.phone_numbers in the last
        n seconds, where n = self.get_load_balancing_interval().

        Returns an SMSLoadBalancingInfo object, which has the phone number to
        use. Since that phone number may end up not being used due to other
        conditions (such as rate limiting), you must call the .finish() method
        on this info object when you're done, sending save_stats=True if you
        ended up using the phone number, or False if not.
        """
        lock_key = "sms-load-balancing-lock-%s" % self._id
        lock = redis_client.lock(lock_key, timeout=30)
        lock.acquire()

        try:
            start_timestamp = (
                datetime.utcnow() -
                timedelta(seconds=self.get_load_balancing_interval()))

            stats_key = "sms-load-balancing-stats-%s" % self._id
            stats = redis_client.get(stats_key)

            # The stats entry looks like {phone_number: [list of timestamps]}
            # for each phone number, showing the list of timestamps that an
            # sms was sent using that phone number. Below, we validate the stats
            # entry and also clean it up to only include timestamps pertinent
            # to load balancing right now.
            try:
                assert stats is not None
                stats = json.loads(stats)
                assert isinstance(stats, dict)

                stats = {
                    k: v
                    for k, v in stats.items() if k in self.phone_numbers
                }
                new_stats = {}
                for k in stats:
                    v = stats[k]
                    assert isinstance(v, list)
                    new_v = []
                    for t in v:
                        try:
                            new_t = parse(t).replace(tzinfo=None)
                        except:
                            new_t = None
                        if isinstance(new_t,
                                      datetime) and new_t > start_timestamp:
                            new_v.append(new_t)
                    new_stats[k] = new_v
                stats = new_stats

                for k in self.phone_numbers:
                    if k not in stats:
                        stats[k] = []
            except:
                stats = {k: [] for k in self.phone_numbers}

            # Now that the stats entry is good, we choose the phone number that
            # has been used the least amount.
            phone_number = self.phone_numbers[0]
            num_sms_sent = len(stats[phone_number])
            for k in self.phone_numbers:
                if len(stats[k]) < num_sms_sent:
                    num_sms_sent = len(stats[k])
                    phone_number = k

            # Add the current timestamp for the chosen number
            stats[phone_number].append(datetime.utcnow())

            return SMSLoadBalancingInfo(phone_number, stats_key, stats,
                                        redis_client, lock)

        except:
            # If an exception occurs, we need to make sure the lock is released.
            # However, if no exception occurs, we don't release the lock since
            # it must be released by calling the .finish() method on the return
            # value.
            release_lock(lock, True)
            raise
コード例 #38
0
ファイル: tasks.py プロジェクト: dimagi/commcare-hq
def process_sms(queued_sms_pk):
    """
    queued_sms_pk - pk of a QueuedSMS entry
    """
    utcnow = get_utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock("sms-queue-processing-%s" % queued_sms_pk)

    if message_lock.acquire(blocking=False):
        try:
            msg = QueuedSMS.objects.get(pk=queued_sms_pk)
        except QueuedSMS.DoesNotExist:
            # The message was already processed and removed from the queue
            release_lock(message_lock, True)
            return

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            remove_from_queue(msg)
            release_lock(message_lock, True)
            return

        outbound_counter = None
        if msg.direction == OUTGOING:
            domain_object = Domain.get_by_name(msg.domain) if msg.domain else None

            if domain_object and handle_domain_specific_delays(msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

            outbound_counter = OutboundDailyCounter(domain_object)
            if not outbound_counter.can_send_outbound_sms(msg):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING

        # We check datetime_to_process against utcnow plus a small amount
        # of time because timestamps can differ between machines which
        # can cause us to miss sending the message the first time and
        # result in an unnecessary delay.
        if (
            isinstance(msg.processed, bool) and
            not msg.processed and
            not msg.error and
            msg.datetime_to_process < (utcnow + timedelta(seconds=10))
        ):
            if recipient_block:
                recipient_lock = get_lock(
                    "sms-queue-recipient-phone-%s" % msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                if (
                    msg.domain and
                    msg.couch_recipient_doc_type and
                    msg.couch_recipient and
                    not is_contact_active(msg.domain, msg.couch_recipient_doc_type, msg.couch_recipient)
                ):
                    msg.set_system_error(SMS.ERROR_CONTACT_IS_INACTIVE)
                    remove_from_queue(msg)
                else:
                    requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                try:
                    handle_incoming(msg)
                except DelayProcessing:
                    process_sms.apply_async([queued_sms_pk], countdown=60)
                    if recipient_block:
                        release_lock(recipient_lock, True)
                    release_lock(message_lock, True)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)
                remove_from_queue(msg)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            if outbound_counter:
                outbound_counter.decrement()
            send_to_sms_queue(msg)
コード例 #39
0
ファイル: tasks.py プロジェクト: saketkanth/commcare-hq
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            if not settings.UNIT_TESTING:
                _assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
                _assert(False, 'Pillow retry {} is still using legacy class {}'.format(
                    error_doc.pk, pillow_name_or_class
                ))
            pillow = _try_legacy_import(pillow_name_or_class)

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        if getattr(pillow, 'include_docs', False):
            try:
                change.set_document(pillow.get_couch_db().open_doc(change.id))
            except ResourceNotFound:
                change.deleted = True

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow
                if isinstance(pillow, ConfigurableReportKafkaPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change, is_retry_attempt=True)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
コード例 #40
0
def process_sms(queued_sms_pk):
    """
    queued_sms_pk - pk of a QueuedSMS entry
    """
    utcnow = get_utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock("sms-queue-processing-%s" % queued_sms_pk)

    if message_lock.acquire(blocking=False):
        try:
            msg = QueuedSMS.objects.get(pk=queued_sms_pk)
        except QueuedSMS.DoesNotExist:
            # The message was already processed and removed from the queue
            release_lock(message_lock, True)
            return

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            remove_from_queue(msg)
            release_lock(message_lock, True)
            return

        outbound_counter = None
        if msg.direction == OUTGOING:
            domain_object = Domain.get_by_name(
                msg.domain) if msg.domain else None

            if domain_object and handle_domain_specific_delays(
                    msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

            outbound_counter = OutboundDailyCounter(domain_object)
            if not outbound_counter.can_send_outbound_sms(msg):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING

        # We check datetime_to_process against utcnow plus a small amount
        # of time because timestamps can differ between machines which
        # can cause us to miss sending the message the first time and
        # result in an unnecessary delay.
        if (isinstance(msg.processed, bool) and not msg.processed
                and not msg.error and msg.datetime_to_process <
            (utcnow + timedelta(seconds=10))):
            if recipient_block:
                recipient_lock = get_lock("sms-queue-recipient-phone-%s" %
                                          msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                if (msg.domain and msg.couch_recipient_doc_type
                        and msg.couch_recipient and not is_contact_active(
                            msg.domain, msg.couch_recipient_doc_type,
                            msg.couch_recipient)):
                    msg.set_system_error(SMS.ERROR_CONTACT_IS_INACTIVE)
                    remove_from_queue(msg)
                else:
                    requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                try:
                    handle_incoming(msg)
                except DelayProcessing:
                    process_sms.apply_async([queued_sms_pk], countdown=60)
                    if recipient_block:
                        release_lock(recipient_lock, True)
                    release_lock(message_lock, True)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)
                remove_from_queue(msg)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            if outbound_counter:
                outbound_counter.decrement()
            send_to_sms_queue(msg)
コード例 #41
0
            rebuild_case_from_actions(case, actions)
            # todo: should this move to case.rebuild?
            if not case.xform_ids:
                if not found:
                    return None
                # there were no more forms. 'delete' the case
                case.doc_type = 'CommCareCase-Deleted'

            # add a "rebuild" action
            case.actions.append(_rebuild_action())
            if save:
                case.save()
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)

    @staticmethod
    def get_case_forms(case_id):
        """
        Get all forms that have submitted against a case (including archived and deleted forms)
        wrapped by the appropriate form type.
        """
        form_ids = get_case_xform_ids(case_id)
        return [fetch_and_wrap_form(id) for id in form_ids]

    @staticmethod
    def get_case_with_lock(case_id,
                           lock=False,
                           strip_history=False,
                           wrap=False):
コード例 #42
0
ファイル: xform.py プロジェクト: ekush/commcare-hq
 def __exit__(self, exc_type, exc_val, exc_tb):
     for lock in self.locks:
         if lock:
             release_lock(lock, True)
コード例 #43
0
def _wait_and_release_lock(lock, timeout, start_timestamp):
    while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
        sleep(0.1)
    release_lock(lock, True)
コード例 #44
0
ファイル: api.py プロジェクト: zbidi/commcare-hq
        "fri/randomized_message",
        startkey=[case.domain, case.case_id],
        endkey=[case.domain, case.case_id, {}],
        include_docs=True).first()
    return any_message is not None


def get_randomized_message(case, order):
    if order >= 0 and order <= 279:
        client = get_redis_client()
        lock = client.lock("fri-randomization-%s" % case.case_id, timeout=300)

        lock.acquire(blocking=True)
        if not already_randomized(case):
            randomize_messages(case)
        release_lock(lock, True)

        message = FRIRandomizedMessage.view(
            "fri/randomized_message",
            key=[case.domain, case.case_id, order],
            include_docs=True).one()
        return message
    else:
        return None


def get_date(case, prop):
    value = case.get_case_property(prop)
    # A datetime is a date, but a date is not a datetime
    if isinstance(value, datetime):
        return datetime.date()
コード例 #45
0
def process_sms(queued_sms_pk):
    """
    queued_sms_pk - pk of a QueuedSMS entry
    """
    client = get_redis_client()
    utcnow = get_utcnow()
    # Prevent more than one task from processing this SMS, just in case
    # the message got enqueued twice.
    message_lock = get_lock(client, "sms-queue-processing-%s" % queued_sms_pk)

    if message_lock.acquire(blocking=False):
        try:
            msg = QueuedSMS.objects.get(pk=queued_sms_pk)
        except QueuedSMS.DoesNotExist:
            # The message was already processed and removed from the queue
            release_lock(message_lock, True)
            return

        if message_is_stale(msg, utcnow):
            msg.set_system_error(SMS.ERROR_MESSAGE_IS_STALE)
            remove_from_queue(msg)
            release_lock(message_lock, True)
            return

        if msg.direction == OUTGOING:
            if msg.domain:
                domain_object = Domain.get_by_name(msg.domain)
            else:
                domain_object = None
            if domain_object and handle_domain_specific_delays(
                    msg, domain_object, utcnow):
                release_lock(message_lock, True)
                return

        requeue = False
        # Process inbound SMS from a single contact one at a time
        recipient_block = msg.direction == INCOMING
        if (isinstance(msg.processed, bool) and not msg.processed
                and not msg.error and msg.datetime_to_process < utcnow):
            if recipient_block:
                recipient_lock = get_lock(
                    client, "sms-queue-recipient-phone-%s" % msg.phone_number)
                recipient_lock.acquire(blocking=True)

            if msg.direction == OUTGOING:
                if (msg.domain and msg.couch_recipient_doc_type
                        and msg.couch_recipient and not is_contact_active(
                            msg.domain, msg.couch_recipient_doc_type,
                            msg.couch_recipient)):
                    msg.set_system_error(SMS.ERROR_CONTACT_IS_INACTIVE)
                    remove_from_queue(msg)
                else:
                    requeue = handle_outgoing(msg)
            elif msg.direction == INCOMING:
                handle_incoming(msg)
            else:
                msg.set_system_error(SMS.ERROR_INVALID_DIRECTION)
                remove_from_queue(msg)

            if recipient_block:
                release_lock(recipient_lock, True)

        release_lock(message_lock, True)
        if requeue:
            process_sms.delay(queued_sms_pk)
コード例 #46
0
ファイル: xform.py プロジェクト: ekush/commcare-hq
 def __exit__(self, exc_type, exc_val, exc_tb):
     for lock in self.locks:
         if lock:
             release_lock(lock, True)
コード例 #47
0
ファイル: tasks.py プロジェクト: nnestle/commcare-hq
def _wait_and_release_lock(lock, timeout, start_timestamp):
    while (datetime.utcnow() - start_timestamp) < timedelta(seconds=timeout):
        sleep(0.1)
    release_lock(lock, True)
コード例 #48
0
ファイル: mixin.py プロジェクト: johan--/commcare-hq
    def _get_next_phone_number(self, redis_client):
        """
        Gets the least-used phone number from self.phone_numbers in the last
        n seconds, where n = self.get_load_balancing_interval().

        Returns an SMSLoadBalancingInfo object, which has the phone number to
        use. Since that phone number may end up not being used due to other
        conditions (such as rate limiting), you must call the .finish() method
        on this info object when you're done, sending save_stats=True if you
        ended up using the phone number, or False if not.
        """
        lock_key = "sms-load-balancing-lock-%s" % self._id
        lock = redis_client.lock(lock_key, timeout=30)
        lock.acquire()

        try:
            start_timestamp = (datetime.utcnow() -
                timedelta(seconds=self.get_load_balancing_interval()))

            stats_key = "sms-load-balancing-stats-%s" % self._id
            stats = redis_client.get(stats_key)

            # The stats entry looks like {phone_number: [list of timestamps]}
            # for each phone number, showing the list of timestamps that an
            # sms was sent using that phone number. Below, we validate the stats
            # entry and also clean it up to only include timestamps pertinent
            # to load balancing right now.
            try:
                assert stats is not None
                stats = json.loads(stats)
                assert isinstance(stats, dict)

                stats = {k: v for k, v in stats.items() if k in self.phone_numbers}
                new_stats = {}
                for k in stats:
                    v = stats[k]
                    assert isinstance(v, list)
                    new_v = []
                    for t in v:
                        try:
                            new_t = parse(t).replace(tzinfo=None)
                        except:
                            new_t = None
                        if isinstance(new_t, datetime) and new_t > start_timestamp:
                            new_v.append(new_t)
                    new_stats[k] = new_v
                stats = new_stats

                for k in self.phone_numbers:
                    if k not in stats:
                        stats[k] = []
            except:
                stats = {k: [] for k in self.phone_numbers}

            # Now that the stats entry is good, we choose the phone number that
            # has been used the least amount.
            phone_number = self.phone_numbers[0]
            num_sms_sent = len(stats[phone_number])
            for k in self.phone_numbers:
                if len(stats[k]) < num_sms_sent:
                    num_sms_sent = len(stats[k])
                    phone_number = k

            # Add the current timestamp for the chosen number
            stats[phone_number].append(datetime.utcnow())

            return SMSLoadBalancingInfo(phone_number, stats_key, stats,
                redis_client, lock)

        except:
            # If an exception occurs, we need to make sure the lock is released.
            # However, if no exception occurs, we don't release the lock since
            # it must be released by calling the .finish() method on the return
            # value.
            release_lock(lock, True)
            raise
コード例 #49
0
ファイル: tasks.py プロジェクト: ansarbek/commcare-hq
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )

    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            return

        pillow_class = error_doc.pillow
        try:
            pillow = get_pillow_instance(pillow_class)
        except ValueError:
            # all fluff pillows have module path of 'fluff' so can't be imported directly
            _, pillow_class_name = pillow_class.rsplit('.', 1)
            try:
                pillow = get_pillow_by_name(pillow_class_name)
            except PillowNotFoundError:
                pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        if pillow.include_docs:
            try:
                change.set_document(pillow.get_couch_db().open_doc(change.id))
            except ResourceNotFound:
                change.deleted = True

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableIndicatorPillow
                if isinstance(pillow, ConfigurableIndicatorPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change, is_retry_attempt=True)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)