Exemplo n.º 1
0
def simulate_reentrant_lock():
    lock1 = get_redis_lock(__name__, timeout=0.5, name="test")
    lock2 = get_redis_lock(__name__, timeout=0.5, name="test")
    assert isinstance(lock1.lock, ReentrantTestLock), lock1.lock
    assert isinstance(lock2.lock, ReentrantTestLock), lock2.lock
    with lock1, lock2:
        pass  # no deadlock, no errors
Exemplo n.º 2
0
def test_redislocks_nose_plugin():
    lock1 = get_redis_lock(__name__, timeout=0.2, name="test")
    assert isinstance(lock1.lock, TestLock), lock1.lock
    lock1.acquire()
    lock2 = get_redis_lock(__name__, timeout=0.2, name="test")
    with assert_raises(TimeoutError):
        lock2.acquire()
    with assert_raises(LockError,
                       msg="Cannot release a lock that's no longer owned"):
        lock1.release()
Exemplo n.º 3
0
def check_repeaters():
    start = datetime.utcnow()
    twentythree_hours_sec = 23 * 60 * 60
    twentythree_hours_later = start + timedelta(hours=23)

    # Long timeout to allow all waiting repeat records to be iterated
    check_repeater_lock = get_redis_lock(
        CHECK_REPEATERS_KEY,
        timeout=twentythree_hours_sec,
        name=CHECK_REPEATERS_KEY,
    )
    if not check_repeater_lock.acquire(blocking=False):
        metrics_counter("commcare.repeaters.check.locked_out")
        return

    try:
        with metrics_histogram_timer(
                "commcare.repeaters.check.processing",
                timing_buckets=_check_repeaters_buckets,
        ):
            for record in iterate_repeat_records(start):
                if not _soft_assert(
                        datetime.utcnow() < twentythree_hours_later,
                        "I've been iterating repeat records for 23 hours. I quit!"
                ):
                    break
                metrics_counter("commcare.repeaters.check.attempt_forward")
                record.attempt_forward_now()
            else:
                iterating_time = datetime.utcnow() - start
                _soft_assert(
                    iterating_time < timedelta(hours=6),
                    f"It took {iterating_time} to iterate repeat records.")
    finally:
        check_repeater_lock.release()
Exemplo n.º 4
0
def test_unreleased_lock():
    msg = "unreleased dict_values([ReentrantTestLock(name='unreleased', level=1)])"
    with assert_raises(AssertionError, msg=msg):
        with reentrant_redis_locks():
            lock = get_redis_lock("unreleased", timeout=0.5, name="test")
            lock.acquire()
    lock.release()
Exemplo n.º 5
0
def handle_fixture_location_update(sender, doc, diff, backend, **kwargs):
    if doc.get('doc_type') == 'XFormInstance' and doc.get(
            'domain') in M4CHANGE_DOMAINS:
        xform = XFormInstance.wrap(doc)
        if hasattr(xform, "xmlns") and xform.xmlns in ALL_M4CHANGE_FORMS:
            location_id = xform.form.get("location_id", None)
            if not location_id:
                return
            client = get_redis_client()
            redis_key = REDIS_FIXTURE_KEYS[xform.domain]
            redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[xform.domain]
            lock = get_redis_lock(redis_lock_key,
                                  timeout=5,
                                  name=redis_lock_key)
            if lock.acquire(blocking=True):
                try:
                    location_ids_str = client.get(redis_key)
                    location_ids = []
                    if location_ids_str:
                        location_ids = json.loads(location_ids_str)
                    if location_id not in location_ids:
                        location_ids.append(location_id)
                    client.set(redis_key, json.dumps(location_ids))
                finally:
                    release_lock(lock, True)
Exemplo n.º 6
0
def check_repeaters():
    start = datetime.utcnow()
    six_hours_sec = 6 * 60 * 60
    six_hours_later = start + timedelta(seconds=six_hours_sec)

    # Long timeout to allow all waiting repeat records to be iterated
    check_repeater_lock = get_redis_lock(
        CHECK_REPEATERS_KEY,
        timeout=six_hours_sec,
        name=CHECK_REPEATERS_KEY,
    )
    if not check_repeater_lock.acquire(blocking=False):
        datadog_counter("commcare.repeaters.check.locked_out")
        return

    try:
        with datadog_bucket_timer(
            "commcare.repeaters.check.processing",
            tags=[],
            timing_buckets=_check_repeaters_buckets,
        ):
            for record in iterate_repeat_records(start):
                if datetime.utcnow() > six_hours_later:
                    _soft_assert(False, "I've been iterating repeat records for six hours. I quit!")
                    break
                datadog_counter("commcare.repeaters.check.attempt_forward")
                record.attempt_forward_now()
    finally:
        check_repeater_lock.release()
Exemplo n.º 7
0
def check_repeaters():
    start = datetime.utcnow()
    six_hours_sec = 6 * 60 * 60
    six_hours_later = start + timedelta(seconds=six_hours_sec)

    # Long timeout to allow all waiting repeat records to be iterated
    check_repeater_lock = get_redis_lock(
        CHECK_REPEATERS_KEY,
        timeout=six_hours_sec,
        name=CHECK_REPEATERS_KEY,
    )
    if not check_repeater_lock.acquire(blocking=False):
        metrics_counter("commcare.repeaters.check.locked_out")
        return

    try:
        with datadog_bucket_timer(
                "commcare.repeaters.check.processing",
                tags=[],
                timing_buckets=_check_repeaters_buckets,
        ):
            for record in iterate_repeat_records(start):
                if datetime.utcnow() > six_hours_later:
                    _soft_assert(
                        False,
                        "I've been iterating repeat records for six hours. I quit!"
                    )
                    break
                metrics_counter("commcare.repeaters.check.attempt_forward")
                record.attempt_forward_now()
    finally:
        check_repeater_lock.release()
Exemplo n.º 8
0
def test_extra_lock_release():
    with reentrant_redis_locks():
        lock = get_redis_lock("unreleased", timeout=0.5, name="test")
        lock.acquire()
        lock.release()
        with assert_raises(RuntimeError):
            lock.release()
Exemplo n.º 9
0
 def get_enqueuing_lock(self, key):
     lock_timeout = self.get_enqueuing_timeout() * 60
     return get_redis_lock(
         key,
         timeout=lock_timeout,
         name=self.get_queue_name(),
         track_unreleased=False,
     )
Exemplo n.º 10
0
 def get_enqueuing_lock(self, key):
     lock_timeout = self.get_enqueuing_timeout() * 60
     return get_redis_lock(
         key,
         timeout=lock_timeout,
         name=self.get_queue_name(),
         track_unreleased=False,
     )
Exemplo n.º 11
0
def get_connection_slot_lock(phone_number, backend, max_simultaneous_connections):
    """
    There is one redis lock per connection slot, numbered from 0 to
    max_simultaneous_connections - 1.
    A slot is taken if the lock can't be acquired.
    """
    slot = get_connection_slot_from_phone_number(phone_number, max_simultaneous_connections)
    key = 'backend-%s-connection-slot-%s' % (backend.couch_id, slot)
    return get_redis_lock(key, timeout=60, name="connection_slot")
Exemplo n.º 12
0
 def get_enqueue_lock(session_id, current_action_due):
     key = "create-task-for-smsforms-session-%s-%s" % (
         session_id, current_action_due.strftime('%Y-%m-%d %H:%M:%S'))
     return get_redis_lock(
         key,
         timeout=60 * 60,
         name="smsforms_task",
         track_unreleased=False,
     )
Exemplo n.º 13
0
def get_connection_slot_lock(phone_number, backend, max_simultaneous_connections):
    """
    There is one redis lock per connection slot, numbered from 0 to
    max_simultaneous_connections - 1.
    A slot is taken if the lock can't be acquired.
    """
    slot = get_connection_slot_from_phone_number(phone_number, max_simultaneous_connections)
    key = 'backend-%s-connection-slot-%s' % (backend.couch_id, slot)
    return get_redis_lock(key, timeout=60, name="connection_slot")
Exemplo n.º 14
0
 def get_enqueue_lock(self, cls, schedule_instance_id, next_event_due):
     key = "create-task-for-%s-%s-%s" % (
         cls.__name__, schedule_instance_id.hex,
         next_event_due.strftime('%Y-%m-%d %H:%M:%S'))
     return get_redis_lock(
         key,
         timeout=60 * 60,
         name="create_task_for_%s" % cls.__name__,
         track_unreleased=False,
     )
Exemplo n.º 15
0
 def get_enqueue_lock(self, queued_sms):
     key = "create-task-for-sms-%s-%s" % (
         queued_sms.pk,
         queued_sms.datetime_to_process.strftime('%Y-%m-%d %H:%M:%S'))
     return get_redis_lock(
         key,
         timeout=3 * 60 * 60,
         name="sms_task",
         track_unreleased=False,
     )
Exemplo n.º 16
0
 def get_enqueue_lock(self, queued_sms):
     key = "create-task-for-sms-%s-%s" % (
         queued_sms.pk,
         queued_sms.datetime_to_process.strftime('%Y-%m-%d %H:%M:%S')
     )
     return get_redis_lock(
         key,
         timeout=3 * 60 * 60,
         name="sms_task",
         track_unreleased=False,
     )
Exemplo n.º 17
0
 def get_enqueue_lock(self, session_id, current_action_due):
     key = "create-task-for-smsforms-session-%s-%s" % (
         session_id,
         current_action_due.strftime('%Y-%m-%d %H:%M:%S')
     )
     return get_redis_lock(
         key,
         timeout=60 * 60,
         name="smsforms_task",
         track_unreleased=False,
     )
Exemplo n.º 18
0
def acquire_redis_lock(record):
    # TODO: Drop this lock at least 48 hours after PR #23580 is deployed
    # By that time all repeat records will have next_check set in the future.

    # Including _rev in the key means that the record will be unlocked
    # for processing every time we execute a `save()` call.
    return get_redis_lock(
        'repeat_record_in_progress-{}_{}'.format(record._id, record._rev),
        timeout=48 * 60 * 60,
        name="repeat_record",
        track_unreleased=False,
    ).acquire()
Exemplo n.º 19
0
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = get_redis_lock(key, timeout=timeout, name=fn.__name__)
            if lock.acquire(blocking=False):
                try:
                    return fn(*args, **kwargs)
                finally:
                    release_lock(lock, True)
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
Exemplo n.º 20
0
        def _inner(self, *args, **kwargs):
            if settings.UNIT_TESTING:  # Don't depend on redis
                return fn(*args, **kwargs)

            key = _get_unique_key(unique_key, fn, *args, **kwargs)
            lock = get_redis_lock(key, timeout=timeout, name=fn.__name__)
            if lock.acquire(blocking=False):
                try:
                    return fn(*args, **kwargs)
                finally:
                    release_lock(lock, True)
            else:
                msg = "Could not aquire lock '{}' for task '{}'.".format(
                    key, fn.__name__)
                self.retry(exc=CouldNotAqcuireLock(msg))
Exemplo n.º 21
0
def generate_fixtures_for_locations():

    client = get_redis_client()
    start_date, end_date = get_last_n_months(1)[0]
    db = FixtureReportResult.get_db()
    data_source = M4ChangeReportDataSource()

    for domain in M4CHANGE_DOMAINS:
        redis_key = REDIS_FIXTURE_KEYS[domain]
        redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[domain]
        lock = get_redis_lock(redis_lock_key, timeout=5, name=redis_lock_key)
        location_ids = []
        if lock.acquire(blocking=True):
            try:
                location_ids_str = client.get(redis_key)
                location_ids = json.loads(
                    location_ids_str if location_ids_str else "[]")
                client.set(redis_key, '[]')
            finally:
                release_lock(lock, True)
        for location_id in location_ids:

            data_source.configure(
                config={
                    "startdate": start_date,
                    "enddate": end_date,
                    "location_id": location_id,
                    "domain": domain
                })
            report_data = data_source.get_data()

            for report_slug in report_data:

                # Remove cached fixture docs
                db.delete_docs(
                    FixtureReportResult.all_by_composite_key(
                        domain, location_id, json_format_date(start_date),
                        json_format_date(end_date), report_slug))
                rows = dict(report_data[report_slug].get("data", []))
                name = report_data[report_slug].get("name")
                FixtureReportResult.save_result(domain, location_id,
                                                start_date.date(),
                                                end_date.date(), report_slug,
                                                rows, name)
Exemplo n.º 22
0
def check_repeaters_in_partition(partition):
    """
    The CHECK_REPEATERS_PARTITION_COUNT constant dictates the total number of partitions
    :param partition: index of partition to check
    """
    start = datetime.utcnow()
    twentythree_hours_sec = 23 * 60 * 60
    twentythree_hours_later = start + timedelta(hours=23)

    # Long timeout to allow all waiting repeat records to be iterated
    lock_key = f"{CHECK_REPEATERS_KEY}_{partition}_in_{CHECK_REPEATERS_PARTITION_COUNT}"
    check_repeater_lock = get_redis_lock(
        lock_key,
        timeout=twentythree_hours_sec,
        name=lock_key,
    )
    if not check_repeater_lock.acquire(blocking=False):
        metrics_counter("commcare.repeaters.check.locked_out",
                        tags={'partition': partition})
        return

    try:
        with metrics_histogram_timer(
                "commcare.repeaters.check.processing",
                timing_buckets=_check_repeaters_buckets,
        ):
            for record in _iterate_repeat_records_for_partition(
                    start, partition, CHECK_REPEATERS_PARTITION_COUNT):
                if not _soft_assert(
                        datetime.utcnow() < twentythree_hours_later,
                        "I've been iterating repeat records for 23 hours. I quit!"
                ):
                    break

                metrics_counter("commcare.repeaters.check.attempt_forward")
                record.attempt_forward_now(is_retry=True)
            else:
                iterating_time = datetime.utcnow() - start
                _soft_assert(
                    iterating_time < timedelta(hours=6),
                    f"It took {iterating_time} to iterate repeat records.")
    finally:
        check_repeater_lock.release()
Exemplo n.º 23
0
def generate_fixtures_for_locations():

    client = get_redis_client()
    start_date, end_date = get_last_n_months(1)[0]
    db = FixtureReportResult.get_db()
    data_source = M4ChangeReportDataSource()

    for domain in M4CHANGE_DOMAINS:
        redis_key = REDIS_FIXTURE_KEYS[domain]
        redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[domain]
        lock = get_redis_lock(redis_lock_key, timeout=5, name=redis_lock_key)
        location_ids = []
        if lock.acquire(blocking=True):
            try:
                location_ids_str = client.get(redis_key)
                location_ids = json.loads(location_ids_str if location_ids_str else "[]")
                client.set(redis_key, '[]')
            finally:
                release_lock(lock, True)
        for location_id in location_ids:

            data_source.configure(config={
                "startdate": start_date,
                "enddate": end_date,
                "location_id": location_id,
                "domain": domain
            })
            report_data = data_source.get_data()

            for report_slug in report_data:

                # Remove cached fixture docs
                db.delete_docs(
                    FixtureReportResult.all_by_composite_key(
                        domain, location_id, json_format_date(start_date),
                        json_format_date(end_date), report_slug)
                )
                rows = dict(report_data[report_slug].get("data", []))
                name = report_data[report_slug].get("name")
                FixtureReportResult.save_result(domain, location_id, start_date.date(), end_date.date(),
                                                report_slug, rows, name)
Exemplo n.º 24
0
 def func_wrapper(request, *args, **kwargs):
     key = "import_locations_async-{domain}".format(domain=request.domain)
     lock = get_redis_lock(
         key,
         timeout=LOCK_LOCATIONS_TIMEOUT,
         name="import_locations_async",
     )
     if lock.acquire(blocking=False):
         try:
             return func(request, *args, **kwargs)
         finally:
             release_lock(lock, True)
     else:
         message = _("Some of the location edits are still in progress, "
                     "please wait until they finish and then try again")
         messages.warning(request, message)
         if request.method == 'DELETE':
             # handle delete_location view
             return json_response({'success': False, 'message': message})
         else:
             return HttpResponseRedirect(request.META['HTTP_REFERER'])
Exemplo n.º 25
0
 def func_wrapper(request, *args, **kwargs):
     key = "import_locations_async-{domain}".format(domain=request.domain)
     lock = get_redis_lock(
         key,
         timeout=LOCK_LOCATIONS_TIMEOUT,
         name="import_locations_async",
     )
     if lock.acquire(blocking=False):
         try:
             return func(request, *args, **kwargs)
         finally:
             release_lock(lock, True)
     else:
         message = _("Some of the location edits are still in progress, "
                     "please wait until they finish and then try again")
         messages.warning(request, message)
         if request.method == 'DELETE':
             # handle delete_location view
             return json_response({'success': False, 'message': message})
         else:
             return HttpResponseRedirect(request.META['HTTP_REFERER'])
Exemplo n.º 26
0
    def get_content_send_lock(self, recipient):
        if is_commcarecase(recipient):
            doc_type = 'CommCareCase'
            doc_id = recipient.case_id
        else:
            doc_type = recipient.doc_type
            doc_id = recipient.get_id

        key = "send-content-for-%s-%s-%s-%s-%s" % (
            self.__class__.__name__,
            self.schedule_instance_id.hex,
            self.next_event_due.strftime('%Y-%m-%d %H:%M:%S'),
            doc_type,
            doc_id,
        )
        return get_redis_lock(
            key,
            timeout=STALE_SCHEDULE_INSTANCE_INTERVAL * 60,
            name="send_content_for_%s" % type(self).__name__,
            track_unreleased=False,
        )
Exemplo n.º 27
0
def process_reporting_metadata_staging():
    from corehq.apps.users.models import (CouchUser,
                                          UserReportingMetadataStaging)
    lock_key = "PROCESS_REPORTING_METADATA_STAGING_TASK"
    process_reporting_metadata_lock = get_redis_lock(
        lock_key,
        timeout=60 * 60,  # one hour
        name=lock_key,
    )
    if not process_reporting_metadata_lock.acquire(blocking=False):
        metrics_counter("commcare.process_reporting_metadata.locked_out")
        return

    try:
        start = datetime.utcnow()

        for i in range(20):
            with transaction.atomic():
                records = (
                    UserReportingMetadataStaging.objects.select_for_update(
                        skip_locked=True).order_by('pk'))[:5]
                for record in records:
                    user = CouchUser.get_by_user_id(record.user_id,
                                                    record.domain)
                    try:
                        record.process_record(user)
                    except ResourceConflict:
                        # https://sentry.io/organizations/dimagi/issues/1479516073/
                        user = CouchUser.get_by_user_id(
                            record.user_id, record.domain)
                        record.process_record(user)
                    record.delete()
    finally:
        process_reporting_metadata_lock.release()

    duration = datetime.utcnow() - start
    run_again = run_periodic_task_again(
        process_reporting_metadata_staging_schedule, start, duration)
    if run_again and UserReportingMetadataStaging.objects.exists():
        process_reporting_metadata_staging.delay()
Exemplo n.º 28
0
def handle_fixture_location_update(sender, doc, diff, backend, **kwargs):
    if doc.get('doc_type') == 'XFormInstance' and doc.get('domain') in M4CHANGE_DOMAINS:
        xform = XFormInstance.wrap(doc)
        if hasattr(xform, "xmlns") and xform.xmlns in ALL_M4CHANGE_FORMS:
            location_id = xform.form.get("location_id", None)
            if not location_id:
                return
            client = get_redis_client()
            redis_key = REDIS_FIXTURE_KEYS[xform.domain]
            redis_lock_key = REDIS_FIXTURE_LOCK_KEYS[xform.domain]
            lock = get_redis_lock(redis_lock_key, timeout=5, name=redis_lock_key)
            if lock.acquire(blocking=True):
                try:
                    location_ids_str = client.get(redis_key)
                    location_ids = []
                    if location_ids_str:
                        location_ids = json.loads(location_ids_str)
                    if location_id not in location_ids:
                        location_ids.append(location_id)
                    client.set(redis_key, json.dumps(location_ids))
                finally:
                    release_lock(lock, True)
Exemplo n.º 29
0
def check_repeaters():
    start = datetime.utcnow()
    six_hours_sec = 6 * 60 * 60
    six_hours_later = start + timedelta(seconds=six_hours_sec)

    # Long timeout to allow all waiting repeat records to be iterated
    check_repeater_lock = get_redis_lock(
        CHECK_REPEATERS_KEY,
        timeout=six_hours_sec,
        name=CHECK_REPEATERS_KEY,
    )
    if not check_repeater_lock.acquire(blocking=False):
        return

    try:
        for record in iterate_repeat_records(start):
            if datetime.utcnow() > six_hours_later:
                _soft_assert(False, "I've been iterating repeat records for six hours. I quit!")
                break
            if acquire_redis_lock(record):
                record.attempt_forward_now()
    finally:
        check_repeater_lock.release()
Exemplo n.º 30
0
def get_lock(key):
    return get_redis_lock(
        key,
        timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT * 60,
        name="_".join(key.split("-", 3)[:3]),
    )
Exemplo n.º 31
0
def get_lock(key):
    return get_redis_lock(
        key,
        timeout=settings.SMS_QUEUE_PROCESSING_LOCK_TIMEOUT * 60,
        name="_".join(key.split("-", 3)[:3]),
    )