Пример #1
0
 def close_or_update_investigation_cases(self, all_cases, retain_case_id,
                                         episode_case_id,
                                         investigation_interval, updates):
     all_case_ids = [
         investigation_case.case_id for investigation_case in all_cases
     ]
     # ToDo: refetch investigation_cases in case the len of set is different from list
     # remove duplicates in case ids to remove so that we don't retain and close
     # the same case by mistake
     all_case_ids = set(all_case_ids)
     case_ids_to_close = all_case_ids.copy()
     case_ids_to_close.remove(retain_case_id)
     for investigation_case in all_cases:
         self.writerow({
             "episode_case_id":
             episode_case_id,
             "investigation_interval":
             investigation_interval,
             "investigation_case_id":
             investigation_case.case_id,
             "modified_on":
             investigation_case.get_case_property(DATE_MODIFIED_FIELD),
             "updates":
             updates,
             "update/close": ('update' if investigation_case.case_id
                              == retain_case_id else 'closed')
         })
     if self.commit:
         updates = [(case_id, {
             'close_reason': "duplicate_reconciliation"
         }, True) for case_id in case_ids_to_close]
         bulk_update_cases(DOMAIN, updates, self.__module__)
Пример #2
0
    def close_cases(self, all_cases, occurrence_case_id, retain_case):
        # remove duplicates in case ids to remove so that we dont retain and close
        # the same case by mistake
        all_case_ids = set([case.case_id for case in all_cases])
        retain_case_id = retain_case.case_id
        case_ids_to_close = all_case_ids.copy()
        case_ids_to_close.remove(retain_case_id)

        case_accessor = CaseAccessors(DOMAIN)
        closing_extension_case_ids = case_accessor.get_extension_case_ids(
            case_ids_to_close)

        self.writerow({
            "occurrence_case_id":
            occurrence_case_id,
            "retain_case_id":
            retain_case_id,
            "closed_case_ids":
            ','.join(map(str, case_ids_to_close)),
            "closed_extension_case_ids":
            ','.join(map(str, closing_extension_case_ids)),
            "person_case_version":
            self.person_case.get_case_property('case_version'),
            "person_case_dataset":
            self.person_case.get_case_property('dataset')
        })
        if self.commit:
            updates = [(case_id, {
                'referral_closed_reason':
                "duplicate_reconciliation",
                'referral_closed_date':
                datetime.datetime.now(pytz.timezone(ENIKSHAY_TIMEZONE)).date()
            }, True) for case_id in case_ids_to_close]
            bulk_update_cases(DOMAIN, updates, self.__module__)
Пример #3
0
    def handle(self, domain, **options):
        batch_size = 100
        updates = []
        errors = []

        case_ids = get_all_episode_ids(domain)
        cases = iter_all_active_person_episode_cases(domain, case_ids)

        for person, episode in with_progress_bar(cases,
                                                 len(case_ids),
                                                 oneline=False):
            try:
                update_json = self.updater(domain, person,
                                           episode).update_json()
            except Exception as e:
                errors.append(
                    [person.case_id, episode.case_id, episode.domain, e])
                continue

            if update_json:
                updates.append((episode.case_id, update_json, False))
            if len(updates) >= batch_size:
                if options['commit']:
                    bulk_update_cases(domain, updates, self.__module__)
                updates = []

        if len(updates) > 0:
            if options['commit']:
                bulk_update_cases(domain, updates, self.__module__)

        self.write_errors(errors)
Пример #4
0
    def run_batch(self, case_ids):
        """Run all case updaters against the case_ids passed in
        """
        device_id = "%s.%s" % (__name__, type(self).__name__)
        update_count = 0
        noupdate_count = 0
        error_count = 0
        success_count = 0
        case_batches = 0
        ledger_batches = 0

        errors = []
        with Timer() as t:
            batch_size = 100
            case_updates = []
            ledger_updates = []
            for episode in self._get_open_episode_cases(case_ids):
                did_error = False
                update_json = {}
                for updater in self.updaters:
                    try:
                        _updater = updater(self.domain, episode)
                        case_update = _updater.update_json()
                        if hasattr(_updater, 'ledger_updates'):
                            ledger_updates.extend(_updater.ledger_updates())
                        update_json.update(get_updated_fields(episode.dynamic_case_properties(), case_update))
                    except Exception as e:
                        did_error = True
                        error = [episode.case_id, episode.domain, updater.__name__, e]
                        errors.append(error)
                        logger.error(error)
                if did_error:
                    error_count += 1
                else:
                    success_count += 1

                if update_json:
                    case_updates.append((episode.case_id, update_json, False))
                    update_count += 1
                else:
                    noupdate_count += 1
                if len(case_updates) >= batch_size:
                    bulk_update_cases(self.domain, case_updates, device_id)
                    case_updates = []
                    case_batches += 1
                if len(ledger_updates) >= batch_size:
                    bulk_update_ledger_cases(self.domain, ledger_updates)
                    ledger_updates = []
                    ledger_batches += 1

            if len(case_updates) > 0:
                bulk_update_cases(self.domain, case_updates, device_id)
            if len(ledger_updates) > 0:
                bulk_update_ledger_cases(self.domain, ledger_updates)

        return BatchStatus(update_count, noupdate_count, success_count, errors, case_batches, ledger_batches,
                           t.interval)
Пример #5
0
 def update_vouchers(self, voucher_updates):
     print "updating voucher cases"
     for chunk in chunked(with_progress_bar(voucher_updates), 100):
         updates = [
             (update.case_id, update.properties, False)
             for update in chunk
         ]
         if self.commit:
             bulk_update_cases(self.domain, updates, self.__module__)
Пример #6
0
def payment_confirmation(request, domain):
    try:
        updates = _get_case_updates(request, domain)
        updates = _validate_updates_exist(domain, updates)
    except ApiError as e:
        if not settings.UNIT_TESTING:
            notify_exception(request, "BETS sent the eNikshay API a bad request.")
        return json_response({"error": e.message}, status_code=e.status_code)

    bulk_update_cases(domain, [
        (update.case_id, update.properties, False) for update in updates
    ], __name__ + ".payment_confirmation")
    return json_response({'status': SUCCESS})
Пример #7
0
def process_payment_confirmations(domain, payment_confirmations):
    for chunk in chunked(payment_confirmations, 100):
        try:
            bulk_update_cases(
                domain, [(update.case_id, update.properties, False)
                         for update in chunk],
                "custom.enikshay.integrations.bets.views.payment_confirmation")
        except Exception as e:
            notify_exception(
                request=None,
                message=PAYMENT_CONFIRMATION_FAILURE,
                details=json.dumps([update.to_json() for update in chunk]),
            )
Пример #8
0
    def handle(self, domain, **options):
        self.domain = domain
        self.accessor = CaseAccessors(domain)
        commit = options['commit']
        self.id_generator = ReadableIdGenerator(domain, commit)

        filename = '{}-{}.csv'.format(
            self.__module__.split('.')[-1],
            datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S'))
        print("Logging actions to {}".format(filename))
        with open(filename, 'w') as f:
            logfile = csv.DictWriter(f,
                                     self.logfile_fields,
                                     extrasaction='ignore')
            logfile.writeheader()

            print("Finding duplicates")
            bad_case_stubs = get_duplicated_case_stubs(self.domain,
                                                       CASE_TYPE_PERSON)
            bad_cases = self.accessor.iter_cases(stub['case_id']
                                                 for stub in bad_case_stubs)

            print("Processing duplicate cases")
            for person_case in with_progress_bar(bad_cases,
                                                 len(bad_case_stubs)):
                if person_case.get_case_property(
                        'enrolled_in_private') == 'true':
                    updates = list(
                        filter(None, self.get_private_updates(person_case)))
                else:
                    updates = list(
                        filter(None, self.get_public_updates(person_case)))

                person_info = self.get_person_case_info(person_case)
                for case, update in updates:
                    log = {
                        unidecode(k): unidecode(v)
                        for d in [person_info, update] for k, v in d.items()
                        if v
                    }
                    log['case_type'] = case.type
                    log['case_id'] = case.case_id
                    logfile.writerow(log)

                if commit:
                    update_tuples = [(case.case_id, update, False)
                                     for case, update in updates]
                    bulk_update_cases(self.domain, update_tuples,
                                      self.__module__)
Пример #9
0
 def handle(self, domain, infile, logfile, *args, **options):
     self.domain = domain
     self.case_accessor = CaseAccessors(self.domain)
     with open(infile,
               'r', encoding='utf-8') as f, open(logfile,
                                                 'w',
                                                 encoding='utf-8') as log:
         reader = csv.reader(f)
         _, case_prop_name = next(reader)
         log.write('--------Successful Form Ids----------\n')
         failed_updates = []
         for rows in chunked(reader, 100):
             updates = [(case_id, {
                 case_prop_name: prop
             }, False) for case_id, prop in rows]
             try:
                 xform, cases = bulk_update_cases(self.domain, updates,
                                                  self.__module__)
                 log.write(xform.form_id + '\n')
             except Exception as e:
                 print('error')
                 print(str(e))
                 failed_updates.extend(u[0] for u in updates)
         log.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             log.write(case_id + '\n')
         log.write('--------Logging Complete--------------\n')
         print('-------------COMPLETE--------------')
 def handle(self, shard, log_file, **options):
     self.domain = 'icds-cas'
     self.db = shard
     self.case_accessor = CaseAccessors(self.domain)
     failed_updates = []
     with open(log_file, "w", encoding='utf-8') as fh:
         fh.write('--------Successful Form Ids----------\n')
         chunk_num = 1
         for orphan_case_chunk in self._get_cases():
             print('Currently on chunk {}'.format(chunk_num))
             case_tupes = [(case_id, {}, True) for case_id in orphan_case_chunk]
             try:
                 xform, cases = bulk_update_cases(
                     self.domain, case_tupes, self.__module__)
                 fh.write(xform.form_id + '\n')
             except LocalSubmissionError as e:
                 print('submission error')
                 print(six.text_type(e))
                 failed_updates.extend(orphan_case_chunk)
             except Exception as e:
                 print('unexpected error')
                 print(six.text_type(e))
                 failed_updates.extend(orphan_case_chunk)
             chunk_num += 1
         fh.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             fh.write(case_id + '\n')
         fh.write('--------Logging Complete--------------\n')
         print('-------------COMPLETE--------------')
Пример #11
0
 def handle(self, domain, log_file, **options):
     total_cases = CaseES().domain(domain).case_type('household').is_closed().count()
     self.case_accessor = CaseAccessors(domain)
     failed_updates = []
     with open(log_file, "w", encoding='utf-8') as fh:
         fh.write('--------Successful Form Ids----------\n')
         for cases in chunked(with_progress_bar(self._get_cases_to_process(domain), total_cases), 100):
             related_cases = self._get_related_cases(cases)
             case_tupes = [(case_id, {}, True) for case_id in related_cases]
             try:
                 xform, cases = bulk_update_cases(
                     domain, case_tupes, self.__module__)
                 fh.write(xform.form_id + '\n')
             except LocalSubmissionError as e:
                 print('submission error')
                 print(six.text_type(e))
                 failed_updates.extend(related_cases)
             except Exception as e:
                 print('unexpected error')
                 print(six.text_type(e))
                 failed_updates.extend(related_cases)
         fh.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             fh.write(case_id)
         print('-------------COMPLETE--------------')
Пример #12
0
    def update_cases(self):
        sector = get_sector(self._person_case)
        case_updates = []
        for prop, value in six.iteritems(self.request_json):
            try:
                param = self.api_spec.get_param(prop, sector)
            except KeyError:
                raise NinetyNineDotsException(
                    "{} is not a valid parameter to update".format(prop))

            if not param.direction & DIRECTION_INBOUND:
                raise NinetyNineDotsException(
                    "{} is not a valid parameter to update".format(prop))

            case_type = param.get_by_sector('case_type', sector)
            case_id = self.case_types_to_cases[case_type].case_id

            if param.setter:
                update = to_function(param.setter)(param, value, sector)
            else:
                update = {param.get_by_sector('case_property', sector): value}
            case_updates.append((case_id, update, False))

        return bulk_update_cases(
            self.domain,
            case_updates,
            "{}.{}".format(self.__module__, self.__class__.__name__),
        )
Пример #13
0
 def handle(self, shard, log_file, **options):
     self.domain = 'icds-cas'
     self.db = shard
     self.case_accessor = CaseAccessors(self.domain)
     failed_updates = []
     with open(log_file, "w") as fh:
         fh.write('--------Successful Form Ids----------\n')
         chunk_num = 1
         for orphan_case_chunk in self._get_cases():
             print('Currently on chunk {}'.format(chunk_num))
             case_tupes = [(case_id, {}, True) for case_id in orphan_case_chunk]
             try:
                 xform, cases = bulk_update_cases(
                     self.domain, case_tupes, self.__module__)
                 fh.write(xform.form_id + '\n')
             except LocalSubmissionError as e:
                 print('submission error')
                 print(unicode(e))
                 failed_updates.extend(orphan_case_chunk)
             except Exception as e:
                 print('unexpected error')
                 print(unicode(e))
                 failed_updates.extend(orphan_case_chunk)
             chunk_num += 1
         fh.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             fh.write(case_id + '\n')
         fh.write('--------Logging Complete--------------\n')
         print('-------------COMPLETE--------------')
Пример #14
0
    def handle(self, log_file, **options):
        self.domain = 'hki-nepal-suaahara-2'
        loc_mapping = {}
        locs = SQLLocation.objects.filter(domain=self.domain, level=4)
        for loc in locs:
            loc_mapping[loc.site_code] = loc.location_id

        failed_updates = []
        household_cases = CaseES().domain(self.domain).case_type('household').count()
        member_cases = CaseES().domain(self.domain).case_type('household_member').count()
        total_cases = household_cases + member_cases
        with open(log_file, "w", encoding='utf-8') as fh:
            fh.write('--------Successful Form Ids----------')
            for cases in chunked(with_progress_bar(self._get_cases_to_process(), total_cases), 100):
                cases_to_update = self._process_cases(cases, failed_updates, loc_mapping)
                try:
                    xform, cases = bulk_update_cases(
                        self.domain, cases_to_update, self.__module__)
                    fh.write(xform.form_id)
                except LocalSubmissionError as e:
                    print(six.text_type(e))
                    failed_updates.extend(case[0] for case in cases_to_update)
            fh.write('--------Failed Cases--------------')
            for case_id in failed_updates:
                fh.write(case_id)
Пример #15
0
 def handle(self, domain, log_file, **options):
     total_cases = CaseES().domain(domain).case_type(
         'household').is_closed().count()
     self.case_accessor = CaseAccessors(domain)
     failed_updates = []
     with open(log_file, "w", encoding='utf-8') as fh:
         fh.write('--------Successful Form Ids----------\n')
         for cases in chunked(
                 with_progress_bar(self._get_cases_to_process(domain),
                                   total_cases), 100):
             related_cases = self._get_related_cases(cases)
             case_tupes = [(case_id, {}, True) for case_id in related_cases]
             try:
                 xform, cases = bulk_update_cases(domain, case_tupes,
                                                  self.__module__)
                 fh.write(xform.form_id + '\n')
             except LocalSubmissionError as e:
                 print('submission error')
                 print(six.text_type(e))
                 failed_updates.extend(related_cases)
             except Exception as e:
                 print('unexpected error')
                 print(six.text_type(e))
                 failed_updates.extend(related_cases)
         fh.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             fh.write(case_id)
         print('-------------COMPLETE--------------')
    def close_cases(self, all_cases, retain_case, associated_case_id,
                    reconciling_case_type):
        # remove duplicates in case ids to remove so that we don't retain and close
        # the same case by mistake
        all_case_ids = set([case.case_id for case in all_cases])
        retain_case_id = retain_case.case_id
        case_ids_to_close = all_case_ids.copy()
        case_ids_to_close.remove(retain_case_id)

        case_accessor = CaseAccessors(DOMAIN)
        closing_extension_case_ids = case_accessor.get_extension_case_ids(
            case_ids_to_close)

        self.writerow({
            "case_type":
            reconciling_case_type,
            "associated_case_id":
            associated_case_id,
            "retain_case_id":
            retain_case_id,
            "closed_case_ids":
            ','.join(map(str, case_ids_to_close)),
            "closed_extension_case_ids":
            ','.join(map(str, closing_extension_case_ids)),
            "retained_case_date_opened":
            str(retain_case.opened_on),
            "retained_case_episode_type":
            retain_case.get_case_property("episode_type"),
            "retained_case_is_active":
            retain_case.get_case_property("is_active"),
            "closed_cases_details": ({
                a_case.case_id: {
                    "last_modified_at(utc)": str(last_user_edit_at(a_case)),
                    "episode_type": a_case.get_case_property("episode_type"),
                    "is_active": a_case.get_case_property("is_active")
                }
                for a_case in all_cases if a_case.case_id != retain_case_id
            })
        })
        if self.commit:
            updates = [(case_id, {
                'close_reason': "duplicate_reconciliation"
            }, True) for case_id in case_ids_to_close]
            bulk_update_cases(DOMAIN, updates, self.__module__)
Пример #17
0
 def handle(self, domain, case_type, *args, **options):
     perform_update = True
     query = (CaseES(es_instance_alias=ES_EXPORT_INSTANCE).domain(
         domain).case_type(case_type).is_closed(False).term(
             'name.exact', ''))
     cases_count = query.count()
     print("Number of cases to be updated approximately: %s" % cases_count)
     if not input("Do you wish to update cases (y/n)") == 'y':
         perform_update = False
         if not input("Do you wish to just log updates (y/n)") == 'y':
             exit(0)
     case_ids = query.get_ids()
     print("Begin iterating %s cases" % len(case_ids))
     case_accessor = CaseAccessors(domain)
     case_updates = []
     filename = "case_updates_%s_%s_%s.csv" % (domain, case_type,
                                               datetime.utcnow())
     with open(filename, 'w') as f:
         writer = csv.DictWriter(f, ['case_id', 'new_value'])
         writer.writeheader()
         for case_id in with_progress_bar(case_ids):
             case = case_accessor.get_case(case_id)
             if case.name:
                 continue
             update_to_name = get_last_non_blank_value(case, 'name')
             if update_to_name:
                 writer.writerow({
                     'case_id': case_id,
                     'new_value': update_to_name
                 })
                 if perform_update:
                     case_updates.append((case_id, {
                         'name': update_to_name
                     }, False))
             # update batch when we have the threshold
             if len(case_updates) == CASE_UPDATE_BATCH:
                 bulk_update_cases(domain, case_updates, DEVICE_ID)
                 case_updates = []
         # submit left over case updates
         if case_updates:
             print("Performing last batch of updates")
             bulk_update_cases(domain, case_updates, DEVICE_ID)
         print("Finished. Update details in %s" % filename)
Пример #18
0
 def run(self):
     # iterate over all open 'episode' cases and set 'adherence' properties
     update_count = 0
     noupdate_count = 0
     error_count = 0
     with Timer() as t:
         batch_size = 100
         updates = []
         for episode in self._get_open_episode_cases():
             adherence_update = EpisodeAdherenceUpdate(episode, self)
             voucher_update = EpisodeVoucherUpdate(self.domain, episode)
             test_update = EpisodeTestUpdate(self.domain, episode)
             episode_facility_id_migration = EpisodeFacilityIDMigration(self.domain, episode)
             try:
                 update_json = adherence_update.update_json()
                 update_json.update(voucher_update.update_json())
                 update_json.update(test_update.update_json())
                 update_json.update(episode_facility_id_migration.update_json())
                 if update_json:
                     updates.append((episode.case_id, update_json, False))
                     update_count += 1
                 else:
                     noupdate_count += 1
                 if len(updates) == batch_size:
                     bulk_update_cases(self.domain, updates)
                     updates = []
             except Exception, e:
                 error_count += 1
                 logger.error(
                     "Error calculating updates for episode case_id({}): {}".format(
                         episode.case_id,
                         e
                     )
                 )
         if len(updates) > 0:
             bulk_update_cases(self.domain, updates)
Пример #19
0
 def handle(self, domain, infile, logfile, *args, **options):
     self.domain = domain
     self.case_accessor = CaseAccessors(self.domain)
     with open(infile, 'r', encoding='utf-8') as f, open(logfile, 'w', encoding='utf-8') as log:
         reader = csv.reader(f)
         _, case_prop_name = next(reader)
         log.write('--------Successful Form Ids----------\n')
         failed_updates = []
         for rows in chunked(reader, 100):
             updates = [(case_id, {case_prop_name: prop}, False) for case_id, prop in rows]
             try:
                 xform, cases = bulk_update_cases(
                     self.domain, updates, self.__module__)
                 log.write(xform.form_id + '\n')
             except Exception as e:
                 print('error')
                 print(six.text_type(e))
                 failed_updates.extend(u[0] for u in updates)
         log.write('--------Failed Cases--------------\n')
         for case_id in failed_updates:
             log.write(case_id + '\n')
         log.write('--------Logging Complete--------------\n')
         print('-------------COMPLETE--------------')
        # remove duplicates in case ids to remove so that we dont retain and close
        # the same case by mistake
        all_case_ids = {case.case_id for case in all_cases}
        retain_case_id = retain_case.case_id
        case_ids_to_close = all_case_ids.copy()
        case_ids_to_close.remove(retain_case_id)

        case_accessor = CaseAccessors(DOMAIN)
        closing_extension_case_ids = case_accessor.get_extension_case_ids(case_ids_to_close)

        self.writerow({
            "occurrence_case_id": occurrence_case_id,
            "person_case_id": self.person_case_id,
            "drug_id": drug_id,
            "retain_case_id": retain_case_id,
            "retain_reason": retain_reason,
            "closed_case_ids": ','.join(map(str, case_ids_to_close)),
            "closed_extension_case_ids": ','.join(map(str, closing_extension_case_ids))
        })
        if self.commit:
            updates = [(case_id, {'close_reason': "duplicate_reconciliation"}, True)
                       for case_id in case_ids_to_close]
            bulk_update_cases(DOMAIN, updates, self.__module__)


def get_open_drug_resistance_cases_from_occurrence(occurrence_case_id):
    case_accessor = CaseAccessors(DOMAIN)
    all_cases = case_accessor.get_reverse_indexed_cases([occurrence_case_id])
    return [case for case in all_cases
            if not case.closed and case.type == CASE_TYPE_DRUG_RESISTANCE]
Пример #21
0
    def handle(self, domain, **options):
        commit = options['commit']

        filename = "reassign_from_facility-{}.csv".format(
            datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S'))
        columns = [
            'case_id', 'facility_assigned_to', 'owner_id',
            'last_owner_id_changed', 'last_facility_assigned_to_changed',
            'note'
        ]

        case_ids = get_all_episode_ids(domain)
        cases = iter_all_active_person_episode_cases(domain,
                                                     case_ids,
                                                     sector='private')
        bad_cases = []
        to_update = []
        for person, _ in with_progress_bar(cases, length=len(case_ids)):
            facility_assigned_to = person.get_case_property(
                'facility_assigned_to')
            owner_id = person.owner_id
            if facility_assigned_to == owner_id:
                continue
            if not facility_assigned_to and owner_id in [MJK, ALERT_INDIA]:
                # cases with a blank facility and owned by MJK or Alert-India are known about already
                continue

            owner_id_changes = sorted(get_all_changes_to_case_property(
                person, 'owner_id'),
                                      key=lambda c: c.modified_on,
                                      reverse=True)
            facility_id_changes = sorted(get_all_changes_to_case_property(
                person, 'facility_assigned_to'),
                                         key=lambda c: c.modified_on,
                                         reverse=True)

            case_dict = {
                'case_id': person.case_id,
                'facility_assigned_to': facility_assigned_to,
                'owner_id': owner_id,
            }
            try:
                case_dict['last_owner_id_changed'] = owner_id_changes[
                    0].modified_on
                case_dict[
                    'last_facility_assigned_to_changed'] = facility_id_changes[
                        0].modified_on
                if owner_id_changes[0].modified_on < facility_id_changes[
                        0].modified_on:
                    case_dict['note'] = 'updated'
                    to_update.append((person.case_id, {
                        "owner_id": facility_assigned_to
                    }, False))
                else:
                    case_dict['note'] = 'not updated'
            except IndexError as e:
                case_dict['last_owner_id_changed'] = None
                case_dict['last_facility_assigned_to_changed'] = None
                case_dict['note'] = 'no changes found: {}'.format(
                    six.text_type(e))

            bad_cases.append(case_dict)

        if commit:
            print("Updating: ", len(to_update), " cases")
            for update in chunked(to_update, 100):
                bulk_update_cases(domain, update, self.__module__)
        else:
            print("Would have updated: ", len(to_update), " cases")

        with open(filename, 'w') as f:
            writer = csv.DictWriter(f, fieldnames=columns)
            writer.writeheader()
            for case in bad_cases:
                writer.writerow(case)