def handle(self, *args, **options): domain = options.get('domain') repeater_id = options.get('repeater_id') state = options.get('state') records_file_path = options.get('records_file_path') if records_file_path: self._load_record_ids_from_file(records_file_path) records = self.record_ids record_count = len(records) elif domain and repeater_id: records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=state) record_count = get_repeat_record_count(domain, repeater_id=repeater_id, state=state) else: raise CommandError("Insufficient Arguments") for record in with_progress_bar(records, length=record_count): if isinstance(record, str): record_id = record try: record = RepeatRecord.get(record_id) except ResourceNotFound: self.ws.append([record_id, '', 'Not Found']) continue self._add_row(record) file_name = self._save_file(repeater_id, state) print("Report saved in file:{filename}".format(filename=file_name))
def handle(self, *args, **options): domain = options.get('domain') repeater_id = options.get('repeater_id') state = options.get('state') records_file_path = options.get('records_file_path') if records_file_path: self._load_record_ids_from_file(records_file_path) records = self.record_ids record_count = len(records) elif domain and repeater_id: records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=state) record_count = get_repeat_record_count(domain, repeater_id=repeater_id, state=state) else: raise CommandError("Insufficient Arguments") for record in with_progress_bar(records, length=record_count): if isinstance(record, str): record_id = record try: record = RepeatRecord.get(record_id) except ResourceNotFound: self.ws.append([record_id, '', 'Not Found']) continue self._add_row(record) file_name = self._save_file(repeater_id, state) print("Report saved in file:{filename}".format(filename=file_name))
def test_get_all_repeat_records_by_domain_since(self): new_records = [ # FAIL RepeatRecord( domain=self.domain, repeater_id=self.repeater_id, last_checked=datetime(2017, 5, 24, 0, 0, 0), failure_reason='some error', ), # CANCELLED RepeatRecord( domain=self.domain, repeater_id=self.repeater_id, last_checked=datetime(2017, 5, 10, 0, 0, 0), cancelled=True, ), # CANCELLED RepeatRecord( domain=self.domain, repeater_id=self.repeater_id, last_checked=datetime(2017, 5, 24, 0, 0, 0), cancelled=True, ), ] RepeatRecord.bulk_save(new_records) self.addCleanup(RepeatRecord.bulk_delete, new_records) records = list(iter_repeat_records_by_domain(self.domain, state=RECORD_CANCELLED_STATE, since=datetime(2017, 5, 20))) self.assertEqual(len(records), 1) record, = records self.assertEqual(record.to_json(), new_records[-1].to_json())
def most_recent_success(self): res = {} for record in iter_repeat_records_by_domain( self.domain, repeater_id=self.repeater_id, state=RECORD_SUCCESS_STATE): if record.last_checked: res[record.payload_id] = max(res.get(record.payload_id, datetime.datetime.min), record.last_checked) return res
def most_recent_success(self): res = {} for record in iter_repeat_records_by_domain( self.domain, repeater_id=self.repeater_id, state=RECORD_SUCCESS_STATE): if record.last_checked: res[record.payload_id] = max(res.get(record.payload_id, datetime.datetime.min), record.last_checked) return res
def reconcile_repeat_records(self, voucher_updates): """ Mark updated records as "succeeded", all others as "cancelled" Delete duplicate records if any exist """ print "Reconciling repeat records" chemist_voucher_repeater_id = 'be435d3f407bfb1016cc89ebbf8146b1' lab_voucher_repeater_id = 'be435d3f407bfb1016cc89ebbfc42a47' already_seen = set() updates_by_voucher_id = {update.id: update for update in voucher_updates} headers = ['record_id', 'voucher_id', 'status'] rows = [] get_db = (lambda: IterDB(RepeatRecord.get_db())) if self.commit else MagicMock with get_db() as iter_db: for repeater_id in [chemist_voucher_repeater_id, lab_voucher_repeater_id]: print "repeater {}".format(repeater_id) records = iter_repeat_records_by_domain(self.domain, repeater_id=repeater_id) record_count = get_repeat_record_count(self.domain, repeater_id=repeater_id) for record in with_progress_bar(records, record_count): if record.payload_id in already_seen: status = "deleted" iter_db.delete(record) elif record.payload_id in updates_by_voucher_id: # add successful attempt status = "succeeded" attempt = RepeatRecordAttempt( cancelled=False, datetime=datetime.datetime.utcnow(), failure_reason=None, success_response="Paid offline via import_voucher_confirmations", next_check=None, succeeded=True, ) record.add_attempt(attempt) iter_db.save(record) else: # mark record as canceled record.add_attempt(RepeatRecordAttempt( cancelled=True, datetime=datetime.datetime.utcnow(), failure_reason="Cancelled during import_voucher_confirmations", success_response=None, next_check=None, succeeded=False, )) iter_db.save(record) already_seen.add(record.payload_id) rows.append([record._id, record.payload_id, status]) self.write_csv('repeat_records', headers, rows)
def create_repeat_records_on_dest_repeater(self, source_repeater_id, dest_repeater_id, state): dest_repeater = Repeater.get(dest_repeater_id) retriggered = set() records = iter_repeat_records_by_domain(domain, repeater_id=source_repeater_id, state=state) record_count = get_repeat_record_count(domain, repeater_id=source_repeater_id, state=state) accessor = CaseAccessors(domain) print("Iterating over records and adding new record for them") for record in with_progress_bar(records, length=record_count): if record.payload_id in retriggered: self.record_failure(record.get_id, record.payload_id, error_message="Already triggered") continue try: episode = accessor.get_case(record.payload_id) episode_case_properties = episode.dynamic_case_properties() if (episode_case_properties.get('nikshay_registered', 'false') == 'false' and episode_case_properties.get( 'private_nikshay_registered', 'false') == 'false' and not episode_case_properties.get('nikshay_id') and episode_case_properties.get('episode_type') == 'confirmed_tb' and is_valid_episode_submission(episode)): new_record = RepeatRecord( domain=domain, next_check=datetime.utcnow(), repeater_id=dest_repeater_id, repeater_type=dest_repeater.doc_type, payload_id=record.payload_id, ) if not self.dry_run: new_record.save() retriggered.add(record.payload_id) self.add_row( record, episode_case_properties.get('migration_created_case'), new_record.get_id) else: self.record_failure(record.get_id, record.payload_id, error_message="Not to be re-triggered") except Exception as e: self.record_failure(record.get_id, record.payload_id, error_message="{error}: {message}".format( error=e.__name__, message=e.message))
def handle(self, domain, repeater_id, *args, **options): self.domain = domain self.repeater_id = repeater_id repeater = Repeater.get(repeater_id) print("Looking up repeat records for '{}'".format( repeater.friendly_name)) redundant_records = [] records_by_payload_id = defaultdict(list) records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE) total_records = 0 for record in records: total_records += 1 most_recent_success = self.most_recent_success.get( record.payload_id) if most_recent_success and record.last_checked < most_recent_success: # another record with this payload has succeeded after this record failed redundant_records.append(record) else: records_by_payload_id[record.payload_id].append(record) unique_payloads = len(records_by_payload_id) redundant_payloads = len(redundant_records) print( "There are {total} total cancelled records, {redundant} with payloads which " "have since succeeded, and {unique} unsent unique payload ids.". format(total=total_records, redundant=redundant_payloads, unique=unique_payloads)) print("Delete {} duplicate records?".format(total_records - unique_payloads)) if not input("(y/n)") == 'y': print("Aborting") return redundant_log = self.delete_already_successful_records( redundant_records) duplicates_log = self.resolve_duplicates(records_by_payload_id) filename = "cancelled_{}_records-{}.csv".format( repeater.__class__.__name__, datetime.datetime.utcnow().isoformat()) print("Writing log of changes to {}".format(filename)) with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow(('RepeatRecord ID', 'Payload ID', 'Failure Reason', 'Deleted?', 'Reason')) writer.writerows(redundant_log) writer.writerows(duplicates_log)
def get_all_rows(self): repeater_id = self.request.GET.get('repeater', None) state = self.request.GET.get('record_state', None) if self.is_rendered_as_email: same_time_yesterday = datetime.today() - timedelta(days=1) return [ [ get_repeat_record_count(self.domain, repeater_id, "SUCCESS"), get_repeat_record_count(self.domain, repeater_id, "SUCCESS", same_time_yesterday), get_repeat_record_count(self.domain, repeater_id, "CANCELLED"), get_repeat_record_count(self.domain, repeater_id, "CANCELLED", same_time_yesterday), ] ] return [self._make_row(record) for record in iter_repeat_records_by_domain(self.domain, repeater_id=repeater_id, state=state)]
def handle(self, domain, **options): # For all successful registration records # If any have an attempt that id "A patient with this beneficiary_id already exists" # Check the episode case. If this doesn't have "dots_99_registered" then set this property to "true" self.commit = options['commit'] repeater_id = 'dc73c3da43d42acd964d80b287926833' # 99dots register accessor = CaseAccessors(domain) existing_message = "A patient with this beneficiary_id already exists" count = get_repeat_record_count(domain, repeater_id, state="SUCCESS") records = iter_repeat_records_by_domain(domain, repeater_id, state="SUCCESS") cases_to_update = set() print("Filtering successful cases") for repeat_record in with_progress_bar(records, length=count): if any((existing_message in attempt.message if attempt.message is not None else "") for attempt in repeat_record.attempts): try: episode = accessor.get_case(repeat_record.payload_id) except CaseNotFound: continue if episode.get_case_property('dots_99_registered') != 'true': cases_to_update.add(episode) timestamp = datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S") with open('{}_set_99dots_to_registered.csv'.format(timestamp), 'w') as f: writer = csv.writer(f) writer.writerow([ 'beneficiary_id', 'episode_id', 'UpdatePatient Status', 'Adherence Status', 'TreatmentOutcome Status' ]) print("Updating {} successful cases in 99DOTS".format( len(cases_to_update))) for case in with_progress_bar(cases_to_update): writer.writerow([ get_person_case_from_episode(domain, case.case_id).case_id, case.case_id, self.update_registered_status(domain, case), self.update_patients(domain, case), self.send_adherence(domain, case), self.send_treatment_outcome(domain, case), ])
def handle(self, domain, repeater_id, *args, **options): self.domain = domain self.repeater_id = repeater_id repeater = Repeater.get(repeater_id) print("Looking up repeat records for '{}'".format(repeater.friendly_name)) redundant_records = [] records_by_payload_id = defaultdict(list) records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE) total_records = 0 for record in records: total_records += 1 most_recent_success = self.most_recent_success.get(record.payload_id) if most_recent_success and record.last_checked < most_recent_success: # another record with this payload has succeeded after this record failed redundant_records.append(record) else: records_by_payload_id[record.payload_id].append(record) unique_payloads = len(records_by_payload_id) redundant_payloads = len(redundant_records) print ("There are {total} total cancelled records, {redundant} with payloads which " "have since succeeded, and {unique} unsent unique payload ids." .format(total=total_records, redundant=redundant_payloads, unique=unique_payloads)) print("Delete {} duplicate records?".format(total_records - unique_payloads)) if not input("(y/n)") == 'y': print("Aborting") return redundant_log = self.delete_already_successful_records(redundant_records) duplicates_log = self.resolve_duplicates(records_by_payload_id) filename = "cancelled_{}_records-{}.csv".format( repeater.__class__.__name__, datetime.datetime.utcnow().isoformat()) print("Writing log of changes to {}".format(filename)) with open(filename, 'w', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(('RepeatRecord ID', 'Payload ID', 'Failure Reason', 'Deleted?', 'Reason')) writer.writerows(redundant_log) writer.writerows(duplicates_log)
def test_get_all_repeat_records_by_domain(self): records = list(iter_repeat_records_by_domain(self.domain)) self.assertEqual(len(records), len(self.records))
def handle(self, days, *args, **options): email_to = options.get('email') # to iterate over repeat records we need time zone independent datetime # so find the difference between timezone needed and utc # For ex: IST is 5 hours 30 mins ahead of utc, so reduce that time in since # datetime to fetch repeat records from midnight IST on since datetime timezone = get_timezone_for_domain(DOMAIN) self.days = days self.till = datetime.datetime.now(tz=timezone) self.since = ( datetime.datetime(self.till.year, self.till.month, self.till.day) - datetime.timedelta(days=days) - datetime.timedelta(hours=5, minutes=30)) result_file_name = "nikshay_registration_notification_time_report_from_%s_till_%s.csv" % ( self.since.strftime('%Y-%m-%d-%H:%M:%S'), self.till.strftime('%Y-%m-%d-%H:%M:%S')) with open(result_file_name, 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=[ "nikshay id", "form finished on", "form submitted on", "notification completed on", "form to submission", "submission to notification", "case id" ]) writer.writeheader() case_accessor = CaseAccessors(DOMAIN) for repeat_record in iter_repeat_records_by_domain( DOMAIN, repeater_id=REGISTRATION_REPEATER_ID, state=SUCCESS_STATE, since=self.since): episode_case_id = repeat_record.payload_id episode_case = case_accessor.get_case(episode_case_id) assert repeat_record.succeeded time_of_notification = pytz.utc.localize( repeat_record.last_checked).astimezone(timezone) # assert that # the last notification was the success one and # the time for last notification is same as that for the repeat record last_notification_attempt = repeat_record.attempts[-1] assert last_notification_attempt.succeeded assert repeat_record.last_checked == last_notification_attempt.datetime property_changed_info = get_latest_property_change_to_value( episode_case, "treatment_initiated", "yes_phi") xform = property_changed_info.transaction.form form_received_on = pytz.utc.localize( xform.received_on).astimezone(timezone) property_modified_on = parse_datetime( property_changed_info.modified_on).astimezone(timezone) writer.writerow({ 'nikshay id': episode_case.get_case_property('nikshay_id'), 'form finished on': property_modified_on.strftime('%Y-%m-%d-%H:%M:%S'), 'form submitted on': form_received_on.strftime('%Y-%m-%d-%H:%M:%S'), 'notification completed on': time_of_notification.strftime('%Y-%m-%d-%H:%M:%S'), 'form to submission': (form_received_on - property_modified_on), 'submission to notification': (time_of_notification - form_received_on), 'case id': episode_case.case_id }) if email_to: email_to = list(email_to) if not isinstance( email_to, six.string_types) else [email_to] csvfile = open(result_file_name) email = EmailMessage( subject="Nikshay Registration Notification Time Report", body= "Report for time taken for registration notifications for %s day(s)" % self.days, to=email_to, from_email=settings.DEFAULT_FROM_EMAIL) email.attach(filename=result_file_name, content=csvfile.read()) csvfile.close() email.send()
def test_get_all_repeat_records_by_domain_with_repeater_id(self): records = list(iter_repeat_records_by_domain(self.domain, repeater_id=self.repeater_id)) self.assertEqual(len(records), 5)
def test_get_all_repeat_records_by_domain_wrong_domain(self): records = list(iter_repeat_records_by_domain("wrong-domain")) self.assertEqual(len(records), 0)
def handle(self, domain, repeater_id, *args, **options): sleep_time = options.get('sleep_time') include_regexps = options.get('include_regexps') exclude_regexps = options.get('exclude_regexps') verbose = options.get('verbose') action = options.get('action') success_message = options.get('success_message') response_status = options.get('response_status') repeater = Repeater.get(repeater_id) print("Looking up repeat records for '{}'".format( repeater.friendly_name)) def meets_filter(record): if exclude_regexps: # Match none of the exclude expressions if record.failure_reason: if any( re.search(exclude_regex, record.failure_reason) for exclude_regex in exclude_regexps): return False if include_regexps: # Match any of the include expressions if not record.failure_reason: return False return any( re.search(include_regex, record.failure_reason) for include_regex in include_regexps) return True # No filter applied records = list( filter( meets_filter, iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE))) if verbose: for record in records: print(record.payload_id, record.failure_reason) total_records = len(records) print("Found {} matching records. {} them?".format( total_records, action)) if not input("(y/n)") == 'y': print("Aborting") return filename = "{}_{}_records-{}.csv".format( action, repeater.__class__.__name__, datetime.datetime.utcnow().strftime('%Y-%m-%d_%H.%M.%S')) with open(filename, 'w', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(('record_id', 'payload_id', 'state', 'message')) for i, record in enumerate(records): try: if action == 'retrigger': if record.next_check is None: record.next_check = datetime.datetime.utcnow() record.fire(force_send=True) elif action == 'succeed': self._succeed_record(record, success_message, response_status) except Exception as e: print("{}/{}: {} {}".format(i + 1, total_records, 'EXCEPTION', repr(e))) writer.writerow( (record._id, record.payload_id, record.state, repr(e))) else: print("{}/{}: {}, {}".format(i + 1, total_records, record.state, record.attempts[-1].message)) writer.writerow( (record._id, record.payload_id, record.state, record.attempts[-1].message)) if sleep_time: time.sleep(float(sleep_time)) print("Wrote log of changes to {}".format(filename))
def handle(self, domain, repeater_id, filename, **options): accessor = CaseAccessors(domain) records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id) record_count = get_repeat_record_count(domain, repeater_id=repeater_id) row_names = [ 'VoucherID', 'EventOccurDate', 'EventID', 'BeneficiaryUUID', 'BeneficiaryType', 'Location', 'Amount', 'DTOLocation', 'InvestigationType', 'PersonId', 'AgencyId', 'EnikshayApprover', 'EnikshayRole', 'EnikshayApprovalDate', 'Succeeded', # Some records did succeed when we sent them. # Include this so they don't re-pay people. ] seen_voucher_ids = set() duplicate_voucher_ids = set() errors = [] with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow(row_names) for record in with_progress_bar(records, length=record_count): try: payload = json.loads(record.get_payload())['voucher_details'][0] voucher_id = record.payload_id payload['Succeeded'] = record.succeeded except Exception as e: errors.append([record.payload_id, six.text_type(e)]) continue if voucher_id in seen_voucher_ids: duplicate_voucher_ids.add(voucher_id) else: seen_voucher_ids.add(voucher_id) row = [ payload.get(name) if payload.get(name) is not None else "" for name in row_names ] writer.writerow(row) print("{} duplicates found".format(len(duplicate_voucher_ids))) if duplicate_voucher_ids: with open('duplicates_{}'.format(filename), 'w') as f: writer = csv.writer(f) for duplicate_id in duplicate_voucher_ids: writer.writerow([duplicate_id]) print("{} errors".format(len(errors))) if errors: with open('errors_{}'.format(filename), 'w') as f: writer = csv.writer(f) writer.writerow(['episode_id', 'error']) for error in errors: writer.writerow(errors)
def handle(self, domain, repeater_id, filename, **options): records = iter_repeat_records_by_domain(domain, repeater_id=repeater_id) record_count = get_repeat_record_count(domain, repeater_id=repeater_id) row_names = [ 'EpisodeID', 'EventOccurDate', 'EventID', 'BeneficiaryUUID', 'BeneficiaryType', 'Location', 'DTOLocation', 'PersonId', 'AgencyId', 'EnikshayApprover', 'EnikshayRole', 'EnikshayApprovalDate', 'Succeeded', # Some records did succeed when we sent them. # Include this so they don't re-pay people. ] errors = [] seen_incentive_ids = set() duplicate_incentive_ids = set() with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow(row_names) for record in with_progress_bar(records, length=record_count): try: payload = json.loads( record.get_payload())['incentive_details'][0] except Exception as e: errors.append( [record.payload_id, record._id, six.text_type(e)]) continue payload['Succeeded'] = record.succeeded incentive_episode_pair = ( payload.get('EpisodeID'), payload.get('EventID'), ) if incentive_episode_pair in seen_incentive_ids: duplicate_incentive_ids.add(incentive_episode_pair) else: seen_incentive_ids.add(incentive_episode_pair) row = [payload.get(name) for name in row_names] writer.writerow(row) print("{} duplicates found".format(len(duplicate_incentive_ids))) if duplicate_incentive_ids: with open('duplicates_{}'.format(filename), 'w') as f: writer = csv.writer(f) writer.writerow(['episode_id', 'event_id']) for duplicate_id in duplicate_incentive_ids: writer.writerow(duplicate_id) print("{} errors".format(len(errors))) if errors: with open('errors_{}'.format(filename), 'w') as f: writer = csv.writer(f) writer.writerow(['episode_id', 'repeat_record_id', 'error']) for error in errors: writer.writerow(error)
def handle(self, domain, repeater_id, *args, **options): sleep_time = options.get('sleep_time') include_regexps = options.get('include_regexps') exclude_regexps = options.get('exclude_regexps') verbose = options.get('verbose') action = options.get('action') success_message = options.get('success_message') response_status = options.get('response_status') repeater = Repeater.get(repeater_id) print("Looking up repeat records for '{}'".format(repeater.friendly_name)) def meets_filter(record): if exclude_regexps: # Match none of the exclude expressions if record.failure_reason: if any(re.search(exclude_regex, record.failure_reason) for exclude_regex in exclude_regexps): return False if include_regexps: # Match any of the include expressions if not record.failure_reason: return False return any(re.search(include_regex, record.failure_reason) for include_regex in include_regexps) return True # No filter applied records = list(filter( meets_filter, iter_repeat_records_by_domain(domain, repeater_id=repeater_id, state=RECORD_CANCELLED_STATE) )) if verbose: for record in records: print(record.payload_id, record.failure_reason) total_records = len(records) print("Found {} matching records. {} them?".format(total_records, action)) if not input("(y/n)") == 'y': print("Aborting") return filename = "{}_{}_records-{}.csv".format( action, repeater.__class__.__name__, datetime.datetime.utcnow().strftime('%Y-%m-%d_%H.%M.%S')) with open(filename, 'w', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(('record_id', 'payload_id', 'state', 'message')) for i, record in enumerate(records): try: if action == 'retrigger': if record.next_check is None: record.next_check = datetime.datetime.utcnow() record.fire(force_send=True) elif action == 'succeed': self._succeed_record(record, success_message, response_status) except Exception as e: print("{}/{}: {} {}".format(i + 1, total_records, 'EXCEPTION', repr(e))) writer.writerow((record._id, record.payload_id, record.state, repr(e))) else: print("{}/{}: {}, {}".format(i + 1, total_records, record.state, record.attempts[-1].message)) writer.writerow((record._id, record.payload_id, record.state, record.attempts[-1].message)) if sleep_time: time.sleep(float(sleep_time)) print("Wrote log of changes to {}".format(filename))