Пример #1
0
def change_from_kafka_message(message):
    change_meta = change_meta_from_kafka_message(message.value)
    try:
        document_store = get_document_store(
            data_source_type=change_meta.data_source_type,
            data_source_name=change_meta.data_source_name,
            domain=change_meta.domain,
            load_source="change_feed",
        )
    except UnknownDocumentStore:
        document_store = None
        notify_error("Unknown document store: {}".format(
            change_meta.data_source_type))
    except UnexpectedBackend:
        document_store = None
        notify_error("Change from incorrect backend",
                     details=change_meta.to_json())
    return Change(
        id=change_meta.document_id,
        sequence_id=message.offset,
        document=None,
        deleted=change_meta.is_deletion,
        metadata=change_meta,
        document_store=document_store,
        topic=message.topic,
        partition=message.partition,
    )
Пример #2
0
def excel_config(request, domain):
    """
    Step one of three.

    This is the initial post when the user uploads the Excel file

    """
    if request.method != 'POST':
        return HttpResponseRedirect(base.ImportCases.get_url(domain=domain))

    if not request.FILES:
        return render_error(request, domain,
                            'Please choose an Excel file to import.')

    uploaded_file_handle = request.FILES['file']
    try:
        case_upload, context = _process_file_and_get_upload(
            uploaded_file_handle,
            request,
            domain,
            max_columns=MAX_CASE_IMPORTER_COLUMNS)
    except ImporterFileNotFound as e:
        notify_error(f"Import file not found after initial upload: {str(e)}")
        return render_error(request, domain, get_importer_error_message(e))
    except ImporterError as e:
        return render_error(request, domain, get_importer_error_message(e))
    except SpreadsheetFileExtError:
        return render_error(
            request, domain,
            _("Please upload file with extension .xls or .xlsx"))

    context.update(_case_importer_breadcrumb_context(_('Case Options'),
                                                     domain))
    return render(request, "case_importer/excel_config.html", context)
Пример #3
0
def run_case_update_rules_for_domain(domain, now=None):
    now = now or datetime.utcnow()
    start_run = datetime.utcnow()
    all_rules = AutomaticUpdateRule.by_domain(domain)
    rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(all_rules)

    for case_type, rules in rules_by_case_type.iteritems():
        boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
        case_id_chunks = AutomaticUpdateRule.get_case_ids(domain, case_type, boundary_date)

        for case_ids in case_id_chunks:
            for case in CaseAccessors(domain).iter_cases(case_ids):
                time_elapsed = datetime.utcnow() - start_run
                if time_elapsed.seconds > HALT_AFTER:
                    notify_error(
                        "Halting rule run for domain %s as it's been running for more than a day." % domain
                    )
                    return

                for rule in rules:
                    stop_processing = rule.apply_rule(case, now)
                    if stop_processing:
                        break

        for rule in rules:
            rule.last_run = now
            rule.save()
Пример #4
0
def process_pillow_retry(error_doc, producer=None):
    producer = producer or kafka_producer
    pillow_name_or_class = error_doc.pillow
    try:
        pillow = _get_pillow(pillow_name_or_class)
    except PillowNotFoundError:
        pillow = None

    if not pillow:
        notify_error((
            "Could not find pillowtop class '%s' while attempting a retry. "
            "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
            "If not, then this should be looked into.") % pillow_name_or_class)
        try:
            error_doc.total_attempts = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF + 1
            error_doc.save()
        finally:
            return

    try:
        if isinstance(pillow.get_change_feed(), CouchChangeFeed):
            _process_couch_change(pillow, error_doc)
        else:
            _process_kafka_change(producer, error_doc)
    except Exception:
        ex_type, ex_value, ex_tb = sys.exc_info()
        error_doc.add_attempt(ex_value, ex_tb)
        error_doc.save()
Пример #5
0
def get_inbound_phone_entry(msg):
    if msg.backend_id:
        backend = SQLMobileBackend.load(msg.backend_id, is_couch_id=True)
        if toggles.INBOUND_SMS_LENIENCY.enabled(backend.domain):
            p = None
            if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(
                    backend.domain):
                running_session_info = XFormsSessionSynchronization.get_running_session_info_for_channel(
                    SMSChannel(backend_id=msg.backend_id,
                               phone_number=msg.phone_number))
                contact_id = running_session_info.contact_id
                if contact_id:
                    p = PhoneNumber.get_phone_number_for_owner(
                        contact_id, msg.phone_number)
                if p is not None:
                    return (p, True)
                elif running_session_info.session_id:
                    # This would be very unusual, as it would mean the supposedly running form session
                    # is linked to a phone number, contact pair that doesn't exist in the PhoneNumber table
                    notify_error(
                        "Contact from running session has no match in PhoneNumber table. "
                        "Only known way for this to happen is if you "
                        "unregister a phone number for a contact "
                        "while they are in an active session.",
                        details={'running_session_info': running_session_info})

            if not backend.is_global:
                p = PhoneNumber.get_two_way_number_with_domain_scope(
                    msg.phone_number, backend.domains_with_access)
                return (p, p is not None)

    return (PhoneNumber.get_reserved_number(msg.phone_number), False)
Пример #6
0
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        try:
            change_metadata = change.metadata
            if change_metadata:
                document_store = get_document_store(
                    data_source_type=change_metadata.data_source_type,
                    data_source_name=change_metadata.data_source_name,
                    domain=change_metadata.domain
                )
                change.document_store = document_store
            pillow.process_change(change)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
Пример #7
0
def check_non_dimagi_superusers():
    non_dimagis_superuser = '******'.join((get_user_model().objects.filter(
        (Q(is_staff=True) | Q(is_superuser=True)) & ~Q(username__endswith='@dimagi.com')
    ).values_list('username', flat=True)))
    if non_dimagis_superuser:
        message = "{non_dimagis} have superuser privileges".format(non_dimagis=non_dimagis_superuser)
        _soft_assert_superusers(False, message)
        notify_error(message=message)
Пример #8
0
def form_session_handler(v, text, msg):
    """
    The form session handler will use the inbound text to answer the next question
    in the open SQLXformsSession for the associated contact. If no session is open,
    the handler passes. If multiple sessions are open, they are all closed and an
    error message is displayed to the user.
    """
    with critical_section_for_smsforms_sessions(v.owner_id):
        if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(v.domain):
            channel = get_channel_for_contact(v.owner_id, v.phone_number)
            running_session_info = XFormsSessionSynchronization.get_running_session_info_for_channel(
                channel)
            if running_session_info.session_id:
                session = SQLXFormsSession.by_session_id(
                    running_session_info.session_id)
                if not session.session_is_open:
                    # This should never happen. But if it does we should set the channel free
                    # and act like there was no available session
                    notify_error(
                        "The supposedly running session was not open and was released. "
                        'No known way for this to happen, so worth investigating.'
                    )
                    XFormsSessionSynchronization.clear_stale_channel_claim(
                        channel)
                    session = None
            else:
                session = None
        else:
            multiple, session = get_single_open_session_or_close_multiple(
                v.domain, v.owner_id)
            if multiple:
                send_sms_to_verified_number(
                    v, get_message(MSG_MULTIPLE_SESSIONS, v))
                return True

        if session:
            session.phone_number = v.phone_number
            session.modified_time = datetime.utcnow()
            session.save()

            # Metadata to be applied to the inbound message
            inbound_metadata = MessageMetadata(
                workflow=session.workflow,
                reminder_id=session.reminder_id,
                xforms_session_couch_id=session._id,
            )
            add_msg_tags(msg, inbound_metadata)

            try:
                answer_next_question(v, text, msg, session)
            except Exception:
                # Catch any touchforms errors
                log_sms_exception(msg)
                send_sms_to_verified_number(
                    v, get_message(MSG_TOUCHFORMS_DOWN, v))
            return True
        else:
            return False
Пример #9
0
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock("pillow-retry-processing-%s" % error_doc_id,
                       timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT *
                       60)
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into.") %
                         pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff(
                ) + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow
                if isinstance(pillow, ConfigurableReportKafkaPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
Пример #10
0
def process_pillow_retry(error_doc):
    pillow_name_or_class = error_doc.pillow
    try:
        pillow = get_pillow_by_name(pillow_name_or_class)
    except PillowNotFoundError:
        pillow = None

    if not pillow:
        notify_error((
            "Could not find pillowtop class '%s' while attempting a retry. "
            "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
            "If not, then this should be looked into."
        ) % pillow_name_or_class)
        try:
            error_doc.total_attempts = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF + 1
            error_doc.save()
        finally:
            return

    change = error_doc.change_object
    delete_all_for_doc = False
    try:
        change_metadata = change.metadata
        if change_metadata:
            document_store = get_document_store(
                data_source_type=change_metadata.data_source_type,
                data_source_name=change_metadata.data_source_name,
                domain=change_metadata.domain,
                load_source="pillow_retry",
            )
            change.document_store = document_store
        if isinstance(pillow.get_change_feed(), CouchChangeFeed):
            pillow.process_change(change)
        else:
            if change_metadata.data_source_type in ('couch', 'sql'):
                data_source_type = change_metadata.data_source_type
            else:
                # legacy metadata will have other values for non-sql
                # can remove this once all legacy errors have been processed
                data_source_type = 'sql'
            producer.send_change(
                get_topic_for_doc_type(
                    change_metadata.document_type,
                    data_source_type
                ),
                change_metadata
            )
            delete_all_for_doc = True
    except Exception:
        ex_type, ex_value, ex_tb = sys.exc_info()
        error_doc.add_attempt(ex_value, ex_tb)
        error_doc.save()
    else:
        if delete_all_for_doc:
            PillowError.objects.filter(doc_id=error_doc.doc_id).delete()
        else:
            error_doc.delete()
Пример #11
0
    def iter_changes(self, since, forever):
        """
        Since can either be an integer (for single topic change feeds) or a dict
        of topics to integers (for multiple topic change feeds)
        """
        # a special value of since=None will start from the end of the change stream

        # in milliseconds, -1 means wait forever for changes
        timeout = -1 if forever else MIN_TIMEOUT

        start_from_latest = since is None

        reset = 'largest' if start_from_latest else 'smallest'
        consumer = self._get_consumer(timeout, auto_offset_reset=reset)
        if not start_from_latest:
            if isinstance(since, dict):
                if not since:
                    since = {topic: 0 for topic in self._topics}
                self._processed_topic_offsets = copy(since)
            else:
                # single topic
                single_topic = self._get_single_topic_or_fail()
                try:
                    offset = int(since)  # coerce sequence IDs to ints
                except ValueError:
                    notify_error(
                        "kafka pillow {} couldn't parse sequence ID {}. rewinding..."
                        .format(self._group_id, since))
                    # since kafka only keeps 7 days of data this isn't a big deal. Hopefully we will only see
                    # these once when each pillow moves over.
                    offset = 0
                self._processed_topic_offsets = {single_topic: offset}

            def _make_offset_tuple(topic):
                if topic in self._processed_topic_offsets:
                    return (topic, self._partition,
                            self._processed_topic_offsets[topic])
                else:
                    return (topic, self._partition)

            offsets = [_make_offset_tuple(topic) for topic in self._topics]
            if self.strict:
                self._validate_offsets(offsets)

            # this is how you tell the consumer to start from a certain point in the sequence
            consumer.set_topic_partitions(*offsets)

        try:
            for message in consumer:
                self._processed_topic_offsets[message.topic] = message.offset
                yield change_from_kafka_message(message)
        except ConsumerTimeout:
            assert not forever, 'Kafka pillow should not timeout when waiting forever!'
Пример #12
0
    def iter_changes(self, since, forever):
        """
        Since can either be an integer (for single topic change feeds) or a dict
        of topics to integers (for multiple topic change feeds)
        """
        # a special value of since=None will start from the end of the change stream

        # in milliseconds, -1 means wait forever for changes
        timeout = -1 if forever else MIN_TIMEOUT

        start_from_latest = since is None

        reset = 'largest' if start_from_latest else 'smallest'
        consumer = self._get_consumer(timeout, auto_offset_reset=reset)
        if not start_from_latest:
            if isinstance(since, dict):
                if not since:
                    since = {topic: 0 for topic in self._topics}
                self._processed_topic_offsets = copy(since)
            else:
                # single topic
                single_topic = self._get_single_topic_or_fail()
                try:
                    offset = int(since)  # coerce sequence IDs to ints
                except ValueError:
                    notify_error("kafka pillow {} couldn't parse sequence ID {}. rewinding...".format(
                        self._group_id, since
                    ))
                    # since kafka only keeps 7 days of data this isn't a big deal. Hopefully we will only see
                    # these once when each pillow moves over.
                    offset = 0
                self._processed_topic_offsets = {single_topic: offset}

            def _make_offset_tuple(topic):
                if topic in self._processed_topic_offsets:
                    return (topic, self._partition, self._processed_topic_offsets[topic])
                else:
                    return (topic, self._partition)

            offsets = [_make_offset_tuple(topic) for topic in self._topics]
            if self.strict:
                self._validate_offsets(offsets)

            # this is how you tell the consumer to start from a certain point in the sequence
            consumer.set_topic_partitions(*offsets)

        try:
            for message in consumer:
                self._processed_topic_offsets[message.topic] = message.offset
                yield change_from_kafka_message(message)
        except ConsumerTimeout:
            assert not forever, 'Kafka pillow should not timeout when waiting forever!'
Пример #13
0
def process_pillow_retry(error_doc):
    pillow_name_or_class = error_doc.pillow
    try:
        pillow = get_pillow_by_name(pillow_name_or_class)
    except PillowNotFoundError:
        pillow = None

    if not pillow:
        notify_error((
            "Could not find pillowtop class '%s' while attempting a retry. "
            "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
            "If not, then this should be looked into.") % pillow_name_or_class)
        try:
            error_doc.total_attempts = const.PILLOW_RETRY_MULTI_ATTEMPTS_CUTOFF + 1
            error_doc.save()
        finally:
            return

    change = error_doc.change_object
    delete_all_for_doc = False
    try:
        change_metadata = change.metadata
        if change_metadata:
            document_store = get_document_store(
                data_source_type=change_metadata.data_source_type,
                data_source_name=change_metadata.data_source_name,
                domain=change_metadata.domain,
                load_source="pillow_retry",
            )
            change.document_store = document_store
        if isinstance(pillow.get_change_feed(), CouchChangeFeed):
            pillow.process_change(change)
        else:
            if change_metadata.data_source_type in ('couch', 'sql'):
                data_source_type = change_metadata.data_source_type
            else:
                # legacy metadata will have other values for non-sql
                # can remove this once all legacy errors have been processed
                data_source_type = 'sql'
            producer.send_change(
                get_topic_for_doc_type(change_metadata.document_type,
                                       data_source_type), change_metadata)
            delete_all_for_doc = True
    except Exception:
        ex_type, ex_value, ex_tb = sys.exc_info()
        error_doc.add_attempt(ex_value, ex_tb)
        error_doc.save()
    else:
        if delete_all_for_doc:
            PillowError.objects.filter(doc_id=error_doc.doc_id).delete()
        else:
            error_doc.delete()
Пример #14
0
def get_session_by_session_id(id):
    """
    Utility method to first try and get a session in sql, then failing that get it in couch
    and log an error.
    """
    sql_session = SQLXFormsSession.by_session_id(id)
    if sql_session:
        return sql_session

    couch_session = XFormsSession.by_session_id(id)
    if couch_session:
        notify_error('session {} could not be found in sql.'.format(couch_session._id))
    return couch_session
Пример #15
0
def check_pillows_for_rewind():
    for pillow in get_couch_pillow_instances():
        checkpoint = pillow.checkpoint
        has_rewound, historical_seq = check_for_rewind(checkpoint)
        if has_rewound:
            notify_error(
                message='Found seq number lower than previous for {}. '
                        'This could mean we are in a rewind state'.format(checkpoint.checkpoint_id),
                details={
                    'pillow checkpoint seq': checkpoint.get_current_sequence_id(),
                    'stored seq': historical_seq
                }
            )
Пример #16
0
def check_pillows_for_rewind():
    for pillow in get_couch_pillow_instances():
        checkpoint = pillow.checkpoint
        has_rewound, historical_seq = check_for_rewind(checkpoint)
        if has_rewound:
            notify_error(
                message='Found seq number lower than previous for {}. '
                        'This could mean we are in a rewind state'.format(checkpoint.checkpoint_id),
                details={
                    'pillow checkpoint seq': checkpoint.get_current_sequence_id(),
                    'stored seq': historical_seq
                }
            )
Пример #17
0
def run_case_update_rules_for_domain_and_db(domain, now, run_id, db=None):
    domain_obj = Domain.get_by_name(domain)
    max_allowed_updates = domain_obj.auto_case_update_limit or settings.MAX_RULE_UPDATES_IN_ONE_RUN
    start_run = datetime.utcnow()

    last_migration_check_time = None
    cases_checked = 0
    case_update_result = CaseRuleActionResult()

    all_rules = list(
        AutomaticUpdateRule.by_domain(
            domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE))
    rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(
        all_rules)

    for case_type, rules in six.iteritems(rules_by_case_type):
        boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
        case_ids = list(
            AutomaticUpdateRule.get_case_ids(domain,
                                             case_type,
                                             boundary_date,
                                             db=db))

        for case in CaseAccessors(domain).iter_cases(case_ids):
            migration_in_progress, last_migration_check_time = check_data_migration_in_progress(
                domain, last_migration_check_time)

            time_elapsed = datetime.utcnow() - start_run
            if (time_elapsed.seconds > HALT_AFTER
                    or case_update_result.total_updates >= max_allowed_updates
                    or migration_in_progress):
                DomainCaseRuleRun.done(run_id,
                                       DomainCaseRuleRun.STATUS_HALTED,
                                       cases_checked,
                                       case_update_result,
                                       db=db)
                notify_error("Halting rule run for domain %s." % domain)
                return

            case_update_result.add_result(run_rules_for_case(case, rules, now))
            cases_checked += 1

    run = DomainCaseRuleRun.done(run_id,
                                 DomainCaseRuleRun.STATUS_FINISHED,
                                 cases_checked,
                                 case_update_result,
                                 db=db)

    if run.status == DomainCaseRuleRun.STATUS_FINISHED:
        for rule in all_rules:
            AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
Пример #18
0
def _check_es_rev(es_alias, doc_id, couch_revs):
    """
    Specific docid and rev checker.

    es_alias: Elasticsearch alias
    doc_id: id to query in ES
    couch_rev: target couch_rev that you want to match
    """
    es_interface = ElasticsearchInterface(get_es_new())
    doc_id_query = {
        "filter": {
            "ids": {
                "values": [doc_id]
            }
        },
        "fields": ["_id", "_rev"]
    }

    try:
        res = es_interface.search(es_alias, body=doc_id_query)
        status = False
        message = "Not in sync"

        if 'hits' in res:
            if res['hits'].get('total', 0) == 0:
                status = False
                # if doc doesn't exist it's def. not in sync
                message = "Not in sync %s" % es_alias
            elif 'hits' in res['hits']:
                fields = res['hits']['hits'][0]['fields']
                if fields['_rev'] in couch_revs:
                    status = True
                    message = "%s OK" % es_alias
                else:
                    status = False
                    # less likely, but if it's there but the rev is off
                    message = "Not in sync - %s stale" % es_alias
        else:
            status = False
            message = "Not in sync - query failed"
            notify_error("%s: %s" % (message, str(res)))
    except Exception as ex:
        message = "ES Error: %s" % ex
        status = False
    return {
        es_alias: {
            "es_alias": es_alias,
            "status": status,
            "message": message
        }
    }
Пример #19
0
def sync_couch_session_from_sql_session(sql_session):
    if sql_session.do_not_sync:
        return

    if not sql_session.couch_id:
        notify_error('Only existing sessions can be synced for now. sql session id is {}'.format(sql_session.pk))
        return

    couch_doc = XFormsSession.get(sql_session.couch_id)
    for attr in SESSION_PROPERTIES_TO_SYNC:
        setattr(couch_doc, attr, getattr(sql_session, attr))

    # don't call .save() since that will create a recursive loop of syncing
    XFormsSession.get_db().save_doc(couch_doc._doc)
Пример #20
0
    def rebuild_tables_if_necessary(self):
        table_map = {t.get_table().name: t for t in self.tables}
        engine = self.get_sql_engine()
        with engine.begin() as connection:
            migration_context = get_migration_context(connection, table_map.keys())
            diffs = compare_metadata(migration_context, metadata)

        tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
        for table_name in tables_to_rebuild:
            table = table_map[table_name]
            try:
                self.rebuild_table(table)
            except TableRebuildError, e:
                notify_error(unicode(e))
Пример #21
0
def jserror(request):
    agent = request.META.get('HTTP_USER_AGENT', None)
    os = browser_name = browser_version = bot = TAG_UNKNOWN
    if agent:
        parsed_agent = httpagentparser.detect(agent)
        bot = parsed_agent.get('bot', False)
        if 'os' in parsed_agent:
            os = parsed_agent['os'].get('name', TAG_UNKNOWN)

        if 'browser' in parsed_agent:
            browser_version = parsed_agent['browser'].get(
                'version', TAG_UNKNOWN)
            browser_name = parsed_agent['browser'].get('name', TAG_UNKNOWN)

    url = request.POST.get('page', None)
    domain = None
    if url:
        path = urlparse(url).path
        if path:
            domain = get_domain_from_url(path)
    domain = domain or '_unknown'

    metrics_counter('commcare.jserror.count',
                    tags={
                        'os': os,
                        'browser_version': browser_version,
                        'browser_name': browser_name,
                        'url': sanitize_url(url),
                        'bot': bot,
                        'domain': domain,
                    })

    notify_error(message=f'[JS] {request.POST.get("message")}',
                 details={
                     'message': request.POST.get('message'),
                     'domain': domain,
                     'page': url,
                     'file': request.POST.get('file'),
                     'line': request.POST.get('line'),
                     'stack': request.POST.get('stack'),
                     'meta': {
                         'os': os,
                         'browser_version': browser_version,
                         'browser_name': browser_name,
                         'bot': bot,
                     }
                 })

    return HttpResponse('')
Пример #22
0
def iter_cases_and_run_rules(domain,
                             case_iterator,
                             rules,
                             now,
                             run_id,
                             case_type,
                             db=None,
                             progress_helper=None):
    from corehq.apps.data_interfaces.models import (
        CaseRuleActionResult,
        DomainCaseRuleRun,
    )
    HALT_AFTER = 23 * 60 * 60

    domain_obj = Domain.get_by_name(domain)
    max_allowed_updates = domain_obj.auto_case_update_limit or settings.MAX_RULE_UPDATES_IN_ONE_RUN
    start_run = datetime.utcnow()
    case_update_result = CaseRuleActionResult()

    cases_checked = 0
    last_migration_check_time = None

    for case in case_iterator:
        migration_in_progress, last_migration_check_time = _check_data_migration_in_progress(
            domain, last_migration_check_time)

        time_elapsed = datetime.utcnow() - start_run
        if (time_elapsed.seconds > HALT_AFTER
                or case_update_result.total_updates >= max_allowed_updates
                or migration_in_progress):
            notify_error("Halting rule run for domain %s and case type %s." %
                         (domain, case_type))

            return DomainCaseRuleRun.done(run_id,
                                          cases_checked,
                                          case_update_result,
                                          db=db,
                                          halted=True)

        case_update_result.add_result(run_rules_for_case(case, rules, now))
        if progress_helper is not None:
            progress_helper.increment_current_case_count()
        cases_checked += 1
    return DomainCaseRuleRun.done(run_id,
                                  cases_checked,
                                  case_update_result,
                                  db=db)
Пример #23
0
def run_case_update_rules_for_domain(domain, now=None):
    domain_obj = Domain.get_by_name(domain)
    max_allowed_updates = domain_obj.auto_case_update_limit or settings.MAX_RULE_UPDATES_IN_ONE_RUN

    now = now or datetime.utcnow()
    start_run = datetime.utcnow()
    last_migration_check_time = None
    run_record = DomainCaseRuleRun.objects.create(
        domain=domain,
        started_on=start_run,
        status=DomainCaseRuleRun.STATUS_RUNNING,
    )
    cases_checked = 0
    case_update_result = CaseRuleActionResult()

    all_rules = AutomaticUpdateRule.by_domain(
        domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
    rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(
        all_rules)

    for case_type, rules in six.iteritems(rules_by_case_type):
        boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
        case_ids = list(
            AutomaticUpdateRule.get_case_ids(domain, case_type, boundary_date))

        for case in CaseAccessors(domain).iter_cases(case_ids):
            migration_in_progress, last_migration_check_time = check_data_migration_in_progress(
                domain, last_migration_check_time)

            time_elapsed = datetime.utcnow() - start_run
            if (time_elapsed.seconds > HALT_AFTER
                    or case_update_result.total_updates >= max_allowed_updates
                    or migration_in_progress):
                run_record.done(DomainCaseRuleRun.STATUS_HALTED, cases_checked,
                                case_update_result)
                notify_error("Halting rule run for domain %s." % domain)
                return

            case_update_result.add_result(run_rules_for_case(case, rules, now))
            cases_checked += 1

        for rule in rules:
            rule.last_run = now
            rule.save()

    run_record.done(DomainCaseRuleRun.STATUS_FINISHED, cases_checked,
                    case_update_result)
Пример #24
0
    def rebuild_tables_if_necessary(self):
        tables_by_engine = defaultdict(dict)
        for adapter in self.table_adapters:
            tables_by_engine[adapter.engine_id][adapter.get_table().name] = adapter

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(connection, table_map.keys())
                diffs = compare_metadata(migration_context, metadata)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
            for table_name in tables_to_rebuild:
                table = table_map[table_name]
                try:
                    self.rebuild_table(table)
                except TableRebuildError, e:
                    notify_error(unicode(e))
Пример #25
0
def _check_es_rev(index, doc_id, couch_revs):
    """
    Specific docid and rev checker.

    index: Elasticsearch index
    doc_id: id to query in ES
    couch_rev: target couch_rev that you want to match
    """
    es = get_es_new()
    doc_id_query = {
        "filter": {
            "ids": {
                "values": [doc_id]
            }
        },
        "fields": ["_id", "_rev"]
    }

    try:
        res = es.search(index, body=doc_id_query)
        status = False
        message = "Not in sync"

        if res.has_key('hits'):
            if res['hits'].get('total', 0) == 0:
                status = False
                # if doc doesn't exist it's def. not in sync
                message = "Not in sync %s" % index
            elif 'hits' in res['hits']:
                fields = res['hits']['hits'][0]['fields']
                if fields['_rev'] in couch_revs:
                    status = True
                    message = "%s OK" % index
                else:
                    status = False
                    # less likely, but if it's there but the rev is off
                    message = "Not in sync - %s stale" % index
        else:
            status = False
            message = "Not in sync - query failed"
            notify_error("%s: %s" % (message, str(res)))
    except Exception, ex:
        message = "ES Error: %s" % ex
        status = False
Пример #26
0
def change_from_kafka_message(message):
    change_meta = change_meta_from_kafka_message(message.value)
    try:
        document_store = get_document_store(
            data_source_type=change_meta.data_source_type,
            data_source_name=change_meta.data_source_name,
            domain=change_meta.domain)
    except UnknownDocumentStore:
        document_store = None
        notify_error("Unknown document store: {}".format(
            change_meta.data_source_type))
    return Change(
        id=change_meta.document_id,
        sequence_id=message.offset,
        document=None,
        deleted=change_meta.is_deletion,
        metadata=change_meta,
        document_store=document_store,
    )
Пример #27
0
def change_from_kafka_message(message):
    change_meta = change_meta_from_kafka_message(message.value)
    try:
        document_store = get_document_store(
            data_source_type=change_meta.data_source_type,
            data_source_name=change_meta.data_source_name,
            domain=change_meta.domain
        )
    except UnknownDocumentStore:
        document_store = None
        notify_error("Unknown document store: {}".format(change_meta.data_source_type))
    return Change(
        id=change_meta.document_id,
        sequence_id=message.offset,
        document=None,
        deleted=change_meta.is_deletion,
        metadata=change_meta,
        document_store=document_store,
    )
Пример #28
0
def _check_es_rev(index, doc_id, couch_revs):
    """
    Specific docid and rev checker.

    index: rawes index
    doc_id: id to query in ES
    couch_rev: target couch_rev that you want to match
    """
    es = get_es()
    doc_id_query = {
        "filter": {
            "ids": {"values": [doc_id]}
        },
        "fields": ["_id", "_rev"]
    }

    try:
        res = es[index].get('_search', data=doc_id_query)
        status = False
        message = "Not in sync"

        if res.has_key('hits'):
            if res['hits'].get('total', 0) == 0:
                status = False
                # if doc doesn't exist it's def. not in sync
                message = "Not in sync %s" % index
            elif 'hits' in res['hits']:
                fields = res['hits']['hits'][0]['fields']
                if fields['_rev'] in couch_revs:
                    status = True
                    message = "%s OK" % index
                else:
                    status = False
                    # less likely, but if it's there but the rev is off
                    message = "Not in sync - %s stale" % index
        else:
            status = False
            message = "Not in sync - query failed"
            notify_error("%s: %s" % (message, str(res)))
    except Exception, ex:
        message = "ES Error: %s" % ex
        status = False
Пример #29
0
    def rebuild_tables_if_necessary(self):
        tables_by_engine = defaultdict(dict)
        for adapter in self.table_adapters:
            tables_by_engine[adapter.engine_id][
                adapter.get_table().name] = adapter

        for engine_id, table_map in tables_by_engine.items():
            engine = connection_manager.get_engine(engine_id)
            with engine.begin() as connection:
                migration_context = get_migration_context(
                    connection, table_map.keys())
                diffs = compare_metadata(migration_context, metadata)

            tables_to_rebuild = get_tables_to_rebuild(diffs, table_map.keys())
            for table_name in tables_to_rebuild:
                table = table_map[table_name]
                try:
                    self.rebuild_table(table)
                except TableRebuildError, e:
                    notify_error(unicode(e))
Пример #30
0
def run_case_update_rules_for_domain_and_db(domain, now, run_id, db=None):
    domain_obj = Domain.get_by_name(domain)
    max_allowed_updates = domain_obj.auto_case_update_limit or settings.MAX_RULE_UPDATES_IN_ONE_RUN
    start_run = datetime.utcnow()

    last_migration_check_time = None
    cases_checked = 0
    case_update_result = CaseRuleActionResult()

    all_rules = list(AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE))
    rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(all_rules)

    for case_type, rules in six.iteritems(rules_by_case_type):
        boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
        case_ids = list(AutomaticUpdateRule.get_case_ids(domain, case_type, boundary_date, db=db))

        for case in CaseAccessors(domain).iter_cases(case_ids):
            migration_in_progress, last_migration_check_time = check_data_migration_in_progress(domain,
                last_migration_check_time)

            time_elapsed = datetime.utcnow() - start_run
            if (
                time_elapsed.seconds > HALT_AFTER or
                case_update_result.total_updates >= max_allowed_updates or
                migration_in_progress
            ):
                DomainCaseRuleRun.done(run_id, DomainCaseRuleRun.STATUS_HALTED, cases_checked, case_update_result,
                    db=db)
                notify_error("Halting rule run for domain %s." % domain)
                return

            case_update_result.add_result(run_rules_for_case(case, rules, now))
            cases_checked += 1

    run = DomainCaseRuleRun.done(run_id, DomainCaseRuleRun.STATUS_FINISHED, cases_checked, case_update_result,
        db=db)

    if run.status == DomainCaseRuleRun.STATUS_FINISHED:
        for rule in all_rules:
            AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
Пример #31
0
def _post_data(data, user_id):
    if not data.get("domain"):
        raise ValueError("Expected domain")

    if not user_id:
        notify_exception(
            None,
            "Making smsforms request w/o user_id. Will result in non-sticky session.",
            details={
                'session-id': data.get('session-id'),
            })

    data = _get_formplayer_session_data(data)
    data_bytes = json.dumps(data).encode('utf-8')
    response = requests.post(
        url="{}/{}".format(get_formplayer_url(), data["action"]),
        data=data_bytes,
        headers={
            "Content-Type":
            "application/json",
            "content-length":
            str(len(data_bytes)),
            "X-MAC-DIGEST":
            get_hmac_digest(settings.FORMPLAYER_INTERNAL_AUTH_KEY, data_bytes),
            "X-FORMPLAYER-SESSION":
            user_id,
        },
    )
    if response.status_code == 404:
        raise Http404(response.reason)
    if 500 <= response.status_code < 600:
        http_error_msg = '{} Server Error: {} for url: {}'.format(
            response.status_code, response.reason, response.url)
        notify_error(http_error_msg,
                     details={
                         'response': response,
                         'body': response.text
                     })
        raise HTTPError(http_error_msg, response=response)
    return response.json()
Пример #32
0
    def create_tasks(self):
        survey_sessions_due_for_action = self.get_survey_sessions_due_for_action(
        )
        all_open_session_ids = self.get_open_session_ids()
        for domain, connection_id, session_id, current_action_due, phone_number in survey_sessions_due_for_action:
            if skip_domain(domain):
                continue

            if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(domain):
                fake_session = SQLXFormsSession(
                    session_id=session_id,
                    connection_id=connection_id,
                    phone_number=phone_number,
                )
                if not XFormsSessionSynchronization.channel_is_available_for_session(
                        fake_session):
                    running_session_info = XFormsSessionSynchronization.get_running_session_info_for_channel(
                        fake_session.get_channel())
                    # First confirm the supposedly running session is even open
                    # and if it's not (should be exceedingly rare) release it and act like it wasn't there
                    if running_session_info.session_id \
                            and running_session_info.session_id not in all_open_session_ids:
                        notify_error(
                            "The supposedly running session was not open and was released. "
                            "No known way for this to happen, so worth investigating.",
                            details={
                                'running_session_info': running_session_info
                            })
                        XFormsSessionSynchronization.clear_stale_channel_claim(
                            fake_session.get_channel())
                    # This is the 99% case: there's a running session for the channel
                    # so leave this session/action in the queue for later and move on to the next one
                    else:
                        continue

            enqueue_lock = self.get_enqueue_lock(session_id,
                                                 current_action_due)
            if enqueue_lock.acquire(blocking=False):
                handle_due_survey_action.delay(domain, connection_id,
                                               session_id)
Пример #33
0
def report_formplayer_error(request, domain):
    data = json.loads(request.body)
    error_type = data.get('type')

    with sentry_sdk.configure_scope() as scope:
        scope.set_tag("cloudcare_error_type", error_type)

    if error_type == 'webformsession_request_failure':
        metrics_counter('commcare.formplayer.webformsession_request_failure', tags={
            'request': data.get('request'),
            'statusText': data.get('statusText'),
            'state': data.get('state'),
            'status': data.get('status'),
            'domain': domain,
            'cloudcare_env': data.get('cloudcareEnv'),
        })
        message = data.get("readableErrorMessage") or "request failure in web form session"
        thread_topic = _message_to_sentry_thread_topic(message)
        notify_error(message=f'[Cloudcare] {thread_topic}', details=data)
    elif error_type == 'show_error_notification':
        message = data.get('message')
        thread_topic = _message_to_sentry_thread_topic(message)
        metrics_counter('commcare.formplayer.show_error_notification', tags={
            'message': _message_to_tag_value(message or 'no_message'),
            'domain': domain,
            'cloudcare_env': data.get('cloudcareEnv'),
        })
        notify_error(message=f'[Cloudcare] {thread_topic}', details=data)
    else:
        metrics_counter('commcare.formplayer.unknown_error_type', tags={
            'domain': domain,
            'cloudcare_env': data.get('cloudcareEnv'),
        })
        notify_error(message=f'[Cloudcare] unknown error type', details=data)
    return JsonResponse({'status': 'ok'})
Пример #34
0
def report_formplayer_error(request, domain):
    data = json.loads(request.body)
    error_type = data.get('type')
    if error_type == 'webformsession_request_failure':
        metrics_counter('commcare.formplayer.webformsession_request_failure',
                        tags={
                            'request': data.get('request'),
                            'statusText': data.get('statusText'),
                            'state': data.get('state'),
                            'status': data.get('status'),
                            'domain': domain,
                        })
        notify_error(message='Formplayer: request failure in web form session',
                     details=data)
    elif error_type == 'show_error_notification':
        message = data.get('message')
        metrics_counter('commcare.formplayer.show_error_notification',
                        tags={
                            'message':
                            _message_to_tag_value(message or 'no_message'),
                            'domain':
                            domain,
                        })
        notify_error(message=f'Formplayer: showed error to user: {message}',
                     details=data)
    else:
        metrics_counter('commcare.formplayer.unknown_error_type',
                        tags={
                            'domain': domain,
                        })
        notify_error(message=f'Formplayer: unknown error type', details=data)
    return JsonResponse({'status': 'ok'})
Пример #35
0
def process_pillow_retry(error_doc_id):
    try:
        error_doc = PillowError.objects.get(id=error_doc_id)
    except PillowError.DoesNotExist:
        return

    pillow_name_or_class = error_doc.pillow
    try:
        pillow = get_pillow_by_name(pillow_name_or_class)
    except PillowNotFoundError:
        pillow = None

    if not pillow:
        notify_error((
            "Could not find pillowtop class '%s' while attempting a retry. "
            "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
            "If not, then this should be looked into.") % pillow_name_or_class)
        try:
            error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
            error_doc.save()
        finally:
            return

    change = error_doc.change_object
    try:
        change_metadata = change.metadata
        if change_metadata:
            document_store = get_document_store(
                data_source_type=change_metadata.data_source_type,
                data_source_name=change_metadata.data_source_name,
                domain=change_metadata.domain)
            change.document_store = document_store
        pillow.process_change(change)
    except Exception:
        ex_type, ex_value, ex_tb = sys.exc_info()
        error_doc.add_attempt(ex_value, ex_tb)
        error_doc.save()
    else:
        error_doc.delete()
Пример #36
0
def send_first_message(domain, recipient, phone_entry_or_number, session, responses, logged_subevent, workflow):
    # This try/except section is just here (temporarily) to support future refactors
    # If any of these notify, they should be replaced with a comment as to why the two are different
    # so that someone refactoring in the future will know that this or that param is necessary.
    try:
        if session.workflow != workflow:
            # see if we can eliminate the workflow arg
            notify_error('Exploratory: session.workflow != workflow', details={
                'session.workflow': session.workflow, 'workflow': workflow})
        if session.connection_id != recipient.get_id:
            # see if we can eliminate the recipient arg
            notify_error('Exploratory: session.connection_id != recipient.get_id', details={
                'session.connection_id': session.connection_id, 'recipient.get_id': recipient.get_id,
                'recipient': recipient
            })
        if session.related_subevent != logged_subevent:
            # see if we can eliminate the logged_subevent arg
            notify_error('Exploratory: session.related_subevent != logged_subevent', details={
                'session.connection_id': session.connection_id, 'logged_subevent': logged_subevent})
    except Exception:
        # The above running is not mission critical, so if it errors just leave a message in the log
        # for us to follow up on.
        # Absence of the message below and messages above ever notifying
        # will indicate that we can remove these args.
        notify_exception(None, "Error in section of code that's just supposed help inform future refactors")

    if toggles.ONE_PHONE_NUMBER_MULTIPLE_CONTACTS.enabled(domain):
        if not XFormsSessionSynchronization.claim_channel_for_session(session):
            send_first_message.apply_async(
                args=(domain, recipient, phone_entry_or_number, session, responses, logged_subevent, workflow),
                countdown=60
            )
            return

    metrics_counter('commcare.smsforms.session_started', 1, tags={'domain': domain, 'workflow': workflow})

    if len(responses) > 0:
        message = format_message_list(responses)
        metadata = MessageMetadata(
            workflow=workflow,
            xforms_session_couch_id=session.couch_id,
        )
        if isinstance(phone_entry_or_number, PhoneNumber):
            send_sms_to_verified_number(
                phone_entry_or_number,
                message,
                metadata,
                logged_subevent=logged_subevent
            )
        else:
            send_sms(
                domain,
                recipient,
                phone_entry_or_number,
                message,
                metadata
            )
    logged_subevent.completed()
Пример #37
0
def run_case_update_rules_for_domain(domain, now=None):
    now = now or datetime.utcnow()
    start_run = datetime.utcnow()
    run_record = DomainCaseRuleRun.objects.create(
        domain=domain,
        started_on=start_run,
        status=DomainCaseRuleRun.STATUS_RUNNING,
    )
    cases_checked = 0
    case_update_result = CaseRuleActionResult()

    all_rules = AutomaticUpdateRule.by_domain(domain)
    rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(all_rules)

    for case_type, rules in rules_by_case_type.iteritems():
        boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
        case_id_chunks = AutomaticUpdateRule.get_case_ids(domain, case_type, boundary_date)

        for case_ids in case_id_chunks:
            for case in CaseAccessors(domain).iter_cases(case_ids):
                time_elapsed = datetime.utcnow() - start_run
                if (
                    time_elapsed.seconds > HALT_AFTER or
                    case_update_result.total_updates >= settings.MAX_RULE_UPDATES_IN_ONE_RUN
                ):
                    run_record.done(DomainCaseRuleRun.STATUS_HALTED, cases_checked, case_update_result)
                    notify_error("Halting rule run for domain %s." % domain)
                    return

                case_update_result.add_result(run_rules_for_case(case, rules, now))
                cases_checked += 1

        for rule in rules:
            rule.last_run = now
            rule.save()

    run_record.done(DomainCaseRuleRun.STATUS_FINISHED, cases_checked, case_update_result)
Пример #38
0
    def iter_changes(self, since, forever):
        """
        Since can either be an integer (for single topic change feeds) or a dict
        of topics to integers (for multiple topic change feeds)
        """
        # a special value of since=None will start from the end of the change stream

        # in milliseconds, -1 means wait forever for changes
        timeout = -1 if forever else MIN_TIMEOUT

        reset = 'smallest' if since is not None else 'largest'
        consumer = self._get_consumer(timeout, auto_offset_reset=reset)
        if since is not None:
            if isinstance(since, dict):
                # multiple topics
                offsets = [(topic, self._partition, offset) for topic, offset in since.items()]
            else:
                # single topic
                topic = self._get_single_topic_or_fail()
                try:
                    offset = int(since)  # coerce sequence IDs to ints
                except ValueError:
                    notify_error("kafka pillow {} couldn't parse sequence ID {}. rewinding...".format(
                        self._group_id, since
                    ))
                    # since kafka only keeps 7 days of data this isn't a big deal. Hopefully we will only see
                    # these once when each pillow moves over.
                    offset = 0
                offsets = [(topic, self._partition, offset)]

            # this is how you tell the consumer to start from a certain point in the sequence
            consumer.set_topic_partitions(*offsets)
        try:
            for message in consumer:
                yield change_from_kafka_message(message)
        except ConsumerTimeout:
            assert not forever, 'Kafka pillow should not timeout when waiting forever!'
Пример #39
0
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )

    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            return

        pillow_class = error_doc.pillow
        try:
            pillow = get_pillow_instance(pillow_class)
        except ValueError:
            # all fluff pillows have module path of 'fluff' so can't be imported directly
            _, pillow_class_name = pillow_class.rsplit('.', 1)
            try:
                pillow = get_pillow_by_name(pillow_class_name)
            except PillowNotFoundError:
                pillow = None

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        if pillow.include_docs:
            try:
                change.set_document(pillow.get_couch_db().open_doc(change.id))
            except ResourceNotFound:
                change.deleted = True

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableIndicatorPillow
                if isinstance(pillow, ConfigurableIndicatorPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change, is_retry_attempt=True)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
Пример #40
0
def send_HTML_email(subject,
                    recipient,
                    html_content,
                    text_content=None,
                    cc=None,
                    email_from=settings.DEFAULT_FROM_EMAIL,
                    file_attachments=None,
                    bcc=None):

    recipient = list(recipient) if not isinstance(recipient, basestring) else [
        recipient
    ]

    if not text_content:
        text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
                               NO_HTML_EMAIL_MESSAGE)
        # this is a temporary spam-catcher, to be removed after fb#178059 is resolved
        if '*****@*****.**' in recipient:
            notify_error(
                "Found an email causing spammy emails to "
                "[email protected]. Here's the HTML content of email"
                "\n {}".format(html_content))

    from_header = {'From': email_from}  # From-header
    connection = get_connection()
    msg = EmailMultiAlternatives(subject,
                                 text_content,
                                 email_from,
                                 recipient,
                                 headers=from_header,
                                 connection=connection,
                                 cc=cc,
                                 bcc=bcc)
    for file in (file_attachments or []):
        if file:
            msg.attach(file["title"], file["file_obj"].getvalue(),
                       file["mimetype"])
    msg.attach_alternative(html_content, "text/html")
    try:
        msg.send()
    except SMTPSenderRefused as e:
        error_subject = _('ERROR: Could not send "%(subject)s"') % {
            'subject': subject,
        }

        if e.smtp_code == 552:
            error_text = _('Could not send email: file size is too large.')
        else:
            error_text = e.smtp_error
        error_text = '%s\n\n%s' % (
            error_text,
            _('Please contact %(support_email)s for assistance.') % {
                'support_email': settings.SUPPORT_EMAIL,
            },
        )

        error_msg = EmailMultiAlternatives(
            error_subject,
            error_text,
            email_from,
            recipient,
            headers=from_header,
            connection=connection,
            cc=cc,
            bcc=bcc,
        )
        error_msg.send()
Пример #41
0
def send_HTML_email(subject, recipient, html_content, text_content=None,
                    cc=None, email_from=settings.DEFAULT_FROM_EMAIL,
                    file_attachments=None, bcc=None, ga_track=False, ga_tracking_info=None):

    recipient = list(recipient) if not isinstance(recipient, basestring) else [recipient]

    if not text_content:
        text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
                               NO_HTML_EMAIL_MESSAGE)
        # this is a temporary spam-catcher, to be removed after fb#178059 is resolved
        if '*****@*****.**' in recipient:
            notify_error("Found an email causing spammy emails to "
                         "[email protected]. Here's the HTML content of email"
                         "\n {}".format(html_content)
            )

    if ga_track and settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'):
        ga_data = {
            'v': 1,
            'tid': settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'),
            'cid': uuid.uuid4().hex,
            'dt': subject.encode('utf-8'),
            't': 'event',
            'ec': 'email'
        }
        extra_data = ga_tracking_info if ga_tracking_info else {}
        ga_data.update(extra_data)
        post_data = urlencode(ga_data)
        url = "https://www.google-analytics.com/collect?" + post_data
        new_content = '<img src="{url}&ea=open"/>\n</body>'.format(url=url)
        html_content, count = re.subn(r'(.*)</body>', r'\1'+new_content, html_content)
        assert count != 0, 'Attempted to add tracking to HTML Email with no closing body tag'

    from_header = {'From': email_from}  # From-header
    connection = get_connection()
    msg = EmailMultiAlternatives(subject, text_content, email_from,
                                 recipient, headers=from_header,
                                 connection=connection, cc=cc, bcc=bcc)
    for file in (file_attachments or []):
        if file:
            msg.attach(file["title"], file["file_obj"].getvalue(),
                       file["mimetype"])
    msg.attach_alternative(html_content, "text/html")
    try:
        msg.send()
    except SMTPSenderRefused as e:
        error_subject = _('ERROR: Could not send "%(subject)s"') % {
            'subject': subject,
        }

        if e.smtp_code == 552:
            error_text = _('Could not send email: file size is too large.')
        else:
            error_text = e.smtp_error
        error_text = '%s\n\n%s' % (
            error_text,
            _('Please contact %(support_email)s for assistance.') % {
                'support_email': settings.SUPPORT_EMAIL,
            },
        )

        error_msg = EmailMultiAlternatives(
            error_subject,
            error_text,
            email_from,
            recipient,
            headers=from_header,
            connection=connection,
            cc=cc,
            bcc=bcc,
        )
        error_msg.send()

    if ga_track and settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'):
        try:
            try:
                requests.get(url + "&ea=send")
            except SSLError:
                # if we get an ssl error try without verification
                requests.get(url + "&ea=send", verify=False)
        except Exception as e:
            # never fail hard on analytics
            logging.exception(u'Unable to send google analytics request for tracked email: {}'.format(e))
Пример #42
0
def process_pillow_retry(error_doc_id):
    # Redis error logged in get_redis_client
    try:
        client = cache_core.get_redis_client()
    except cache_core.RedisClientError:
        return

    # Prevent more than one task from processing this error, just in case
    # it got enqueued twice.
    lock = client.lock(
        "pillow-retry-processing-%s" % error_doc_id,
        timeout=settings.PILLOW_RETRY_PROCESSING_LOCK_TIMEOUT*60
    )
    if lock.acquire(blocking=False):
        try:
            error_doc = PillowError.objects.get(id=error_doc_id)
        except PillowError.DoesNotExist:
            release_lock(lock, True)
            return

        pillow_name_or_class = error_doc.pillow
        try:
            pillow = get_pillow_by_name(pillow_name_or_class)
        except PillowNotFoundError:
            if not settings.UNIT_TESTING:
                _assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
                _assert(False, 'Pillow retry {} is still using legacy class {}'.format(
                    error_doc.pk, pillow_name_or_class
                ))
            pillow = _try_legacy_import(pillow_name_or_class)

        if not pillow:
            notify_error((
                "Could not find pillowtop class '%s' while attempting a retry. "
                "If this pillow was recently deleted then this will be automatically cleaned up eventually. "
                "If not, then this should be looked into."
            ) % pillow_name_or_class)
            try:
                error_doc.total_attempts = PillowError.multi_attempts_cutoff() + 1
                error_doc.save()
            finally:
                release_lock(lock, True)
                return

        change = error_doc.change_object
        if getattr(pillow, 'include_docs', False):
            try:
                change.set_document(pillow.get_couch_db().open_doc(change.id))
            except ResourceNotFound:
                change.deleted = True

        try:
            try:
                from corehq.apps.userreports.pillow import ConfigurableReportKafkaPillow
                if isinstance(pillow, ConfigurableReportKafkaPillow):
                    raise Exception('this is temporarily not supported!')
            except ImportError:
                pass
            pillow.process_change(change, is_retry_attempt=True)
        except Exception:
            ex_type, ex_value, ex_tb = sys.exc_info()
            error_doc.add_attempt(ex_value, ex_tb)
            error_doc.queued = False
            error_doc.save()
        else:
            error_doc.delete()
        finally:
            release_lock(lock, True)
Пример #43
0
def send_HTML_email(subject,
                    recipient,
                    html_content,
                    text_content=None,
                    cc=None,
                    email_from=settings.DEFAULT_FROM_EMAIL,
                    file_attachments=None,
                    bcc=None,
                    ga_track=False,
                    ga_tracking_info=None):

    recipient = list(recipient) if not isinstance(recipient, basestring) else [
        recipient
    ]

    if not text_content:
        text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
                               NO_HTML_EMAIL_MESSAGE)
        # this is a temporary spam-catcher, to be removed after fb#178059 is resolved
        if '*****@*****.**' in recipient:
            notify_error(
                "Found an email causing spammy emails to "
                "[email protected]. Here's the HTML content of email"
                "\n {}".format(html_content))

    if ga_track and settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'):
        ga_data = {
            'v': 1,
            'tid': settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'),
            'cid': uuid.uuid4().hex,
            'dt': subject.encode('utf-8'),
            't': 'event',
            'ec': 'email'
        }
        extra_data = ga_tracking_info if ga_tracking_info else {}
        ga_data.update(extra_data)
        post_data = urlencode(ga_data)
        url = "https://www.google-analytics.com/collect?" + post_data
        new_content = '<img src="{url}&ea=open"/>\n</body>'.format(url=url)
        html_content, count = re.subn(r'(.*)</body>', r'\1' + new_content,
                                      html_content)
        assert count != 0, 'Attempted to add tracking to HTML Email with no closing body tag'

    from_header = {'From': email_from}  # From-header
    connection = get_connection()
    msg = EmailMultiAlternatives(subject,
                                 text_content,
                                 email_from,
                                 recipient,
                                 headers=from_header,
                                 connection=connection,
                                 cc=cc,
                                 bcc=bcc)
    for file in (file_attachments or []):
        if file:
            msg.attach(file["title"], file["file_obj"].getvalue(),
                       file["mimetype"])
    msg.attach_alternative(html_content, "text/html")
    try:
        msg.send()
    except SMTPSenderRefused as e:
        error_subject = _('ERROR: Could not send "%(subject)s"') % {
            'subject': subject,
        }

        if e.smtp_code == 552:
            error_text = _('Could not send email: file size is too large.')
        else:
            error_text = e.smtp_error
        error_text = '%s\n\n%s' % (
            error_text,
            _('Please contact %(support_email)s for assistance.') % {
                'support_email': settings.SUPPORT_EMAIL,
            },
        )

        error_msg = EmailMultiAlternatives(
            error_subject,
            error_text,
            email_from,
            recipient,
            headers=from_header,
            connection=connection,
            cc=cc,
            bcc=bcc,
        )
        error_msg.send()

    if ga_track and settings.ANALYTICS_IDS.get('GOOGLE_ANALYTICS_API_ID'):
        try:
            try:
                requests.get(url + "&ea=send")
            except SSLError:
                # if we get an ssl error try without verification
                requests.get(url + "&ea=send", verify=False)
        except Exception as e:
            # never fail hard on analytics
            logging.exception(
                u'Unable to send google analytics request for tracked email: {}'
                .format(e))