Exemplo n.º 1
0
    def hard_rebuild_case(domain, case_id, detail, lock=True):
        if lock:
            # only record metric if locking since otherwise it has been
            # (most likley) recorded elsewhere
            case_load_counter("rebuild_case", domain)()
        case, lock_obj = FormProcessorSQL.get_case_with_lock(case_id, lock=lock)
        found = bool(case)
        if not found:
            case = CommCareCaseSQL(case_id=case_id, domain=domain)
            if lock:
                lock_obj = CommCareCaseSQL.get_obj_lock_by_id(case_id)
                acquire_lock(lock_obj, degrade_gracefully=False)

        try:
            assert case.domain == domain, (case.domain, domain)
            case, rebuild_transaction = FormProcessorSQL._rebuild_case_from_transactions(case, detail)
            if case.is_deleted and not case.is_saved():
                return None

            case.server_modified_on = rebuild_transaction.server_date
            CaseAccessorSQL.save_case(case)
            publish_case_saved(case)
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)
Exemplo n.º 2
0
def rebuild_and_diff_cases(sql_case, couch_case, original_couch_case, diff,
                           dd_count):
    """Try rebuilding SQL case and save if rebuild resolves diffs

    :param sql_case: CommCareCaseSQL object.
    :param couch_case: JSON-ified version of CommCareCase.
    :param diff: function to produce diffs between couch and SQL case JSON.
    :param dd_count: metrics recording counter function.
    :returns: list of diffs returned by `diff(couch_case, rebuilt_case_json)`
    """
    lock = CommCareCaseSQL.get_obj_lock_by_id(sql_case.case_id)
    acquire_lock(lock, degrade_gracefully=False)
    try:
        if should_sort_sql_transactions(sql_case, couch_case):
            new_case = rebuild_case_with_couch_action_order(
                sql_case, couch_case)
            dd_count("commcare.couchsqlmigration.case.rebuild.sql.sort")
        else:
            new_case = rebuild_case(sql_case)
            dd_count("commcare.couchsqlmigration.case.rebuild.sql")
        sql_json = new_case.to_json()
        diffs = diff(couch_case, sql_json)
        if diffs:
            original_diffs = diff(original_couch_case, sql_json)
            if not original_diffs:
                log.info("original Couch case matches rebuilt SQL case: %s",
                         sql_case.case_id)
                diffs = original_diffs
        if not diffs:
            # save case only if rebuild resolves diffs
            CaseAccessorSQL.save_case(new_case)
            publish_case_saved(new_case)
    finally:
        release_lock(lock, degrade_gracefully=True)
    return sql_json, diffs
Exemplo n.º 3
0
def recalculate_stagnant_cases():
    domain = 'icds-cas'
    config_ids = [
        'static-icds-cas-static-ccs_record_cases_monthly_v2',
        'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',
        'static-icds-cas-static-child_cases_monthly_v2',
    ]

    stagnant_cases = set()

    for config_id in config_ids:
        config, is_static = get_datasource_config(config_id, domain)
        adapter = get_indicator_adapter(config)
        case_ids = _find_stagnant_cases(adapter)
        celery_task_logger.info(
            "Found {} stagnant cases in config {}".format(len(case_ids), config_id)
        )
        stagnant_cases = stagnant_cases.union(set(case_ids))
        celery_task_logger.info(
            "Total number of stagant cases is now {}".format(len(stagnant_cases))
        )

    case_accessor = CaseAccessors(domain)
    num_stagnant_cases = len(stagnant_cases)
    current_case_num = 0
    for case_ids in chunked(stagnant_cases, 1000):
        current_case_num += len(case_ids)
        cases = case_accessor.get_cases(list(case_ids))
        for case in cases:
            publish_case_saved(case, send_post_save_signal=False)
        celery_task_logger.info(
            "Resaved {} / {} cases".format(current_case_num, num_stagnant_cases)
        )
Exemplo n.º 4
0
 def hard_delete_case_and_forms(cls, domain, case, xforms):
     form_ids = [xform.form_id for xform in xforms]
     FormAccessorSQL.hard_delete_forms(domain, form_ids)
     CaseAccessorSQL.hard_delete_cases(domain, [case.case_id])
     for form in xforms:
         form.state |= XFormInstanceSQL.DELETED
         publish_form_saved(form)
     case.deleted = True
     publish_case_saved(case)
Exemplo n.º 5
0
    def publish_changes_to_kafka(processed_forms, cases, stock_result):
        publish_form_saved(processed_forms.submitted)
        cases = cases or []
        for case in cases:
            publish_case_saved(case)

        if stock_result:
            for ledger in stock_result.models_to_save:
                publish_ledger_v2_saved(ledger)
Exemplo n.º 6
0
def resave_case(domain, case, send_post_save_signal=True):
    from corehq.form_processor.change_publishers import publish_case_saved
    if should_use_sql_backend(domain):
        publish_case_saved(case, send_post_save_signal)
    else:
        if send_post_save_signal:
            case.save()
        else:
            CommCareCase.get_db().save_doc(case._doc)  # don't just call save to avoid signals
Exemplo n.º 7
0
    def hard_rebuild_case(domain, case_id, detail):
        try:
            case = CaseAccessorSQL.get_case(case_id)
            assert case.domain == domain
            found = True
        except CaseNotFound:
            case = CommCareCaseSQL(case_id=case_id, domain=domain)
            found = False

        case = FormProcessorSQL._rebuild_case_from_transactions(case, detail)
        if case.is_deleted and not found:
            return None
        CaseAccessorSQL.save_case(case)
        publish_case_saved(case)
        return case
Exemplo n.º 8
0
    def _publish_changes(processed_forms, cases, stock_result):
        # todo: form deprecations?
        publish_form_saved(processed_forms.submitted)
        if processed_forms.submitted.is_duplicate:
            # for duplicate forms, also publish changes for the original form since the fact that
            # we're getting a duplicate indicates that we may not have fully processd/published it
            # the first time
            republish_all_changes_for_form(
                processed_forms.submitted.domain, processed_forms.submitted.orig_id)

        cases = cases or []
        for case in cases:
            publish_case_saved(case)

        if stock_result:
            for ledger in stock_result.models_to_save:
                publish_ledger_v2_saved(ledger)
Exemplo n.º 9
0
    def hard_rebuild_case(domain, case_id, detail, lock=True):
        case, lock_obj = FormProcessorSQL.get_case_with_lock(case_id, lock=lock)
        found = bool(case)
        if not found:
            case = CommCareCaseSQL(case_id=case_id, domain=domain)
            if lock:
                lock_obj = CommCareCaseSQL.get_obj_lock_by_id(case_id)
                acquire_lock(lock_obj, degrade_gracefully=False)

        try:
            assert case.domain == domain, (case.domain, domain)
            case, rebuild_transaction = FormProcessorSQL._rebuild_case_from_transactions(case, detail)
            if case.is_deleted and not case.is_saved():
                return None

            case.server_modified_on = rebuild_transaction.server_date
            CaseAccessorSQL.save_case(case)
            publish_case_saved(case)
            return case
        finally:
            release_lock(lock_obj, degrade_gracefully=True)
Exemplo n.º 10
0
 def _publish_changes(processed_forms, cases):
     # todo: form deprecations?
     publish_form_saved(processed_forms.submitted)
     cases = cases or []
     for case in cases:
         publish_case_saved(case)
Exemplo n.º 11
0
def resync_case_to_es(domain, case):
    if should_use_sql_backend(domain):
        publish_case_saved(case)
    else:
        CommCareCase.get_db().save_doc(case._doc)  # don't just call save to avoid signals