Beispiel #1
0
            }
        )
        self.retry(exc=e)


@task(serializer='pickle', queue="email_queue",
      bind=True, default_retry_delay=15 * 60, max_retries=10, acks_late=True)
def mail_admins_async(self, subject, message, fail_silently=False, connection=None,
                      html_message=None):
    try:
        mail_admins(subject, message, fail_silently, connection, html_message)
    except Exception as e:
        notify_exception(
            None,
            message="Encountered error while sending email to admins",
            details={
                'subject': subject,
                'error': e,
            }
        )
        self.retry(exc=e)


def get_maintenance_alert_active():
    from corehq.apps.hqwebapp.models import MaintenanceAlert
    return 1 if MaintenanceAlert.get_latest_alert() else 0


datadog_gauge_task('commcare.maintenance_alerts.active', get_maintenance_alert_active,
                   run_every=crontab(minute=1))
Beispiel #2
0
    if repeat_record.cancelled:
        return

    repeater = repeat_record.repeater
    if not repeater:
        repeat_record.cancel()
        repeat_record.save()
        return

    try:
        if repeater.paused:
            # postpone repeat record by 1 day so that these don't get picked in each cycle and
            # thus clogging the queue with repeat records with paused repeater
            repeat_record.postpone_by(timedelta(days=1))
            return
        if repeater.doc_type.endswith(DELETED_SUFFIX):
            if not repeat_record.doc_type.endswith(DELETED_SUFFIX):
                repeat_record.doc_type += DELETED_SUFFIX
                repeat_record.save()
        elif repeat_record.state == RECORD_PENDING_STATE or repeat_record.state == RECORD_FAILURE_STATE:
                repeat_record.fire()
    except Exception:
        logging.exception('Failed to process repeat record: {}'.format(repeat_record._id))


repeaters_overdue = datadog_gauge_task(
    'commcare.repeaters.overdue',
    get_overdue_repeat_record_count,
    run_every=crontab()  # every minute
)
Beispiel #3
0
@no_result_task(serializer='pickle', queue='background_queue', acks_late=True,
                default_retry_delay=5 * 60, max_retries=10, bind=True)
def publish_sms_change(self, sms):
    try:
        publish_sms_saved(sms)
    except Exception as e:
        self.retry(exc=e)


@no_result_task(serializer='pickle', queue='background_queue')
def sync_phone_numbers_for_domain(domain):
    for user_id in CouchUser.ids_by_domain(domain, is_active=True):
        _sync_user_phone_numbers(user_id)

    for user_id in CouchUser.ids_by_domain(domain, is_active=False):
        _sync_user_phone_numbers(user_id)

    case_ids = CaseAccessors(domain).get_case_ids_in_domain()
    for case in CaseAccessors(domain).iter_cases(case_ids):
        _sync_case_phone_number(case)

    MigrationStatus.set_migration_completed('phone_sync_domain_%s' % domain)


def queued_sms():
    return QueuedSMS.objects.count()


datadog_gauge_task('commcare.sms.queued', queued_sms, run_every=crontab())
Beispiel #4
0
    num_chunks += 1
    return {
        'created_count': created_count,
        'match_count': match_count,
        'too_many_matches': too_many_matches,
        'errors': errors.as_dict(),
        'num_chunks': num_chunks,
    }


def _alert_on_result(result, domain):
    """ Check import result and send internal alerts based on result

    :param result: dict that should include key "created_count" pointing to an int
    """

    if result['created_count'] > 10000:
        message = "A case import just uploaded {num} new cases to HQ. {domain} might be scaling operations".format(
            num=result['created_count'],
            domain=domain
        )
        alert = AbnormalUsageAlert(source="case importer", domain=domain, message=message)
        send_abnormal_usage_alert.delay(alert)


total_bytes = datadog_gauge_task(
    'commcare.case_importer.files.total_bytes',
    get_case_upload_files_total_bytes,
    run_every=crontab(minute=0)
)
Beispiel #5
0
      default_retry_delay=15 * 60,
      max_retries=10,
      acks_late=True)
def mail_admins_async(self,
                      subject,
                      message,
                      fail_silently=False,
                      connection=None,
                      html_message=None):
    try:
        mail_admins(subject, message, fail_silently, connection, html_message)
    except Exception as e:
        notify_exception(
            None,
            message="Encountered error while sending email to admins",
            details={
                'subject': subject,
                'error': e,
            })
        self.retry(exc=e)


def get_maintenance_alert_active():
    from corehq.apps.hqwebapp.models import MaintenanceAlert
    return 1 if MaintenanceAlert.get_latest_alert() else 0


datadog_gauge_task('commcare.maintenance_alerts.active',
                   get_maintenance_alert_active,
                   run_every=crontab(minute=1))
Beispiel #6
0
            if phone_number not in numbers_that_should_exist:
                phone_entries[phone_number].delete()

        # Create entries that should exist but do not exist
        for phone_number in numbers_that_should_exist:
            if phone_number not in phone_entries:
                try:
                    couch_user.create_phone_entry(phone_number)
                except InvalidFormatException:
                    pass


@no_result_task(serializer='pickle',
                queue='background_queue',
                acks_late=True,
                default_retry_delay=5 * 60,
                max_retries=10,
                bind=True)
def publish_sms_change(self, sms):
    try:
        publish_sms_saved(sms)
    except Exception as e:
        self.retry(exc=e)


def queued_sms():
    return QueuedSMS.objects.count()


datadog_gauge_task('commcare.sms.queued', queued_sms, run_every=crontab())
Beispiel #7
0
        repeat_record.repeater
    except ResourceNotFound:
        repeat_record.cancel()
        repeat_record.save()

    try:
        if repeat_record.repeater.doc_type.endswith(DELETED_SUFFIX):
            if not repeat_record.doc_type.endswith(DELETED_SUFFIX):
                repeat_record.doc_type += DELETED_SUFFIX
                repeat_record.save()
        elif repeat_record.state == RECORD_PENDING_STATE or repeat_record.state == RECORD_FAILURE_STATE:
            repeat_record.fire()
    except Exception:
        logging.exception('Failed to process repeat record: {}'.format(
            repeat_record._id))


def _get_repeat_record_lock_key(record):
    """
    Including the rev in the key means that the record will be unlocked for processing
    every time we execute a `save()` call.
    """
    return 'repeat_record_in_progress-{}_{}'.format(record._id, record._rev)


repeaters_overdue = datadog_gauge_task(
    'commcare.repeaters.overdue',
    get_overdue_repeat_record_count,
    run_every=crontab()  # every minute
)
Beispiel #8
0
                caseblocks.append(RowAndCase(i, caseblock))
                match_count += 1
            except CaseBlockError:
                errors.add(ImportErrors.CaseGeneration, i + 1)

        # check if we've reached a reasonable chunksize
        # and if so submit
        if len(caseblocks) >= chunksize:
            _submit_caseblocks(domain, config.case_type, caseblocks)
            num_chunks += 1
            caseblocks = []

    # final purge of anything left in the queue
    if _submit_caseblocks(domain, config.case_type, caseblocks):
        match_count -= 1
    num_chunks += 1
    return {
        'created_count': created_count,
        'match_count': match_count,
        'too_many_matches': too_many_matches,
        'errors': errors.as_dict(),
        'num_chunks': num_chunks,
    }


total_bytes = datadog_gauge_task(
    'commcare.case_importer.files.total_bytes',
    get_case_upload_files_total_bytes,
    run_every=crontab(minute=0)
)