def get_sex(self, empi): person = loader.get_patient_by_EMPI(empi) if 'Sex' in person: sex = person['Gender'] return int(sex == 'Female\r\n') else: return 0
def get_sent_vector(self, empi): patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) diagnoses = get_diagnoses(empi) date_key = extract_data.get_date_key(self.note_type) notes = [] if self.note_type in patient.keys() and date_key != None: # Get sorted list of notes before procedure time_idx_pairs = [] for i in range(len(patient[self.note_type])): doc = patient[self.note_type][i] date = extract_data.parse_date(doc[date_key]) if date != None and date < operation_date: time_idx_pairs.append((operation_date - date, i)) time_idx_pairs.sort() for time,idx in time_idx_pairs[:self.max_notes]: doc = patient[self.note_type][idx] notes.append(doc['free_text']) # ensure that notes vector length is equal to max_notes if len(notes) < self.max_notes: delta = self.max_notes - len(notes) for i in range(delta): notes.append('') # Turn notes into Doc Vectors vectors = map(self.get_sent_vector_from_doc, notes) return np.array(vectors).flatten()
def get_icd9_vector(self, empi): patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) diagnoses = get_diagnoses(empi) # Build a diagnosis history vector for the whole patient diagnosis_history = None for (date, code_type, code, diagnosis_name) in diagnoses: if date < operation_date: if code_type == 'ICD9': cleaned_code = code.replace('.','') if cleaned_code in self.categories: diagnosis_cat = self.categories[cleaned_code] diagnosis_vec = self.get_vector_from_category(diagnosis_cat) if diagnosis_history is None: diagnosis_history = diagnosis_vec else: diagnosis_history += diagnosis_vec else: #print "Non-ICD9 Code for Patient: " + empi pass # Normalize array to be 1's or zeros, not counts if diagnosis_history is not None: diagnosis_history = np.array(map(int, diagnosis_history > 0)) return diagnosis_history
def get_sent_vector(self, empi): patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) diagnoses = get_diagnoses(empi) date_key = extract_data.get_date_key(self.note_type) notes = [] if self.note_type in patient.keys() and date_key != None: # Get sorted list of notes before procedure time_idx_pairs = [] for i in range(len(patient[self.note_type])): doc = patient[self.note_type][i] date = extract_data.parse_date(doc[date_key]) if date != None and date < operation_date: time_idx_pairs.append((operation_date - date, i)) time_idx_pairs.sort() for time, idx in time_idx_pairs[:self.max_notes]: doc = patient[self.note_type][idx] notes.append(doc['free_text']) # ensure that notes vector length is equal to max_notes if len(notes) < self.max_notes: delta = self.max_notes - len(notes) for i in range(delta): notes.append('') # Turn notes into Doc Vectors vectors = map(self.get_sent_vector_from_doc, notes) return np.array(vectors).flatten()
def main(): empi = "FAKE_EMPI_385" # testing a single patient symptoms_regexes = getSymptomsRegexes() person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) note_types = ['Car', 'Lno'] person_pos_history = {} person_neg_history = {} sec_per_day = 24 * 60 * 60 for note_type in note_types: print 'Examining ' + note_type + ' Notes for Patient ' + empi date_key = extract_data.get_date_key(note_type) if note_type in person.keys() and date_key != None: for i in range(len(person[note_type])): print '\tNote' + str(i) doc = person[note_type][i] date = extract_data.parse_date(doc[date_key]) if date != None: delta_days = (date - operation_date).total_seconds() / sec_per_day for sym in symptoms_regexes: normal, neg_pre, neg_suff = [bool(x.search(doc['free_text'])) for x in symptoms_regexes[sym]] if neg_pre or neg_suff: if sym in person_neg_history: person_neg_history[sym].append(delta_days) else: person_neg_history[sym] = [delta_days] print '\t\tNegative,' + sym + ',' + str(delta_days) elif normal: if sym in person_pos_history: person_pos_history[sym].append(delta_days) else: person_pos_history[sym] = [delta_days] print '\t\tPositive,' + sym + ',' + str(delta_days) return person_pos_history, person_neg_history
def get_icd9_vector(self, empi): patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) diagnoses = get_diagnoses(empi) # Build a diagnosis history vector for the whole patient diagnosis_history = None for (date, code_type, code, diagnosis_name) in diagnoses: if date < operation_date: if code_type == 'ICD9': cleaned_code = code.replace('.', '') if cleaned_code in self.categories: diagnosis_cat = self.categories[cleaned_code] diagnosis_vec = self.get_vector_from_category( diagnosis_cat) if diagnosis_history is None: diagnosis_history = diagnosis_vec else: diagnosis_history += diagnosis_vec else: #print "Non-ICD9 Code for Patient: " + empi pass # Normalize array to be 1's or zeros, not counts if diagnosis_history is not None: diagnosis_history = np.array(map(int, diagnosis_history > 0)) return diagnosis_history
def get_labs_history(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) lab_history = structured_data_extractor.get_lab_history_before_date(empi, operation_date, self.time_thresholds_months) lab_history_transformed = {} for lab in lab_history: for i in range(len(self.time_thresholds_months)): lab_history_transformed[lab + '_H_' + str(self.time_thresholds_months[i])] = 1 if lab_history[lab][i] == 'H' else 0 lab_history_transformed[lab + '_L_' + str(self.time_thresholds_months[i])] = 1 if lab_history[lab][i] == 'L' else 0 return lab_history_transformed
def get_labs_latest_low(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) labs_latest = structured_data_extractor.get_labs_before_date(empi, operation_date)[3] labs_latest_low = {} for lab in labs_latest: if labs_latest[lab][1] == 'L': labs_latest_low[lab] = 1 else: labs_latest_low[lab] = 0 return labs_latest_low
def get_med_classes(self, empi): patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) medications = [] for med in patient['Med']: try: date = parse_m_d_y(med['Medication_Date']) if date <= procedure_date: medications.extend(med['RXNORM_CLASSES']) except: pass return medications
def get_diagnoses(empi): """Given an empi, will the return the diagnosis timeline T for that patient. T is just an array of tuples of the form (diagnosis date, Code_Type, code, diagnosis name), sorted by date. Note that a given date may, and often does, have several diagnoses. Also, a diagnosis can be repeatedly reported on every visit.""" p = loader.get_patient_by_EMPI(empi) diagnoses = [] if 'Dia' in p.keys(): for dia in p['Dia']: diagnoses.append((extract_data.parse_date(dia['Date']), dia['Code_Type'], dia['Code'], dia['Diagnosis_Name'])) diagnoses.sort() return diagnoses
def get_latest_lab_values(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) latest_labs = structured_data_extractor.get_recent_lab_values(empi, operation_date) latest_lab_values = {} for lab in latest_labs: if latest_labs[lab][1]: try: latest_lab_values[lab] = float(latest_labs[lab][1]) except: latest_lab_values[lab] = latest_labs[lab][1] return latest_lab_values
def get_recent_lab_values(empi, date): p = loader.get_patient_by_EMPI(empi) lab_latest = {} if 'Lab' in p.keys(): for lab in p['Lab']: if lab['Seq_Date_Time'] and extract_data.parse_date(lab['Seq_Date_Time']) < date: lab_date = extract_data.parse_date(lab['Seq_Date_Time']) if lab['Group_Id'] in lab_latest: recorded_test_date = lab_latest[lab['Group_Id']][0] if lab_date > recorded_test_date: # keep most recent test value lab_latest[lab['Group_Id']] = (lab_date, lab['Result']) else: lab_latest[lab['Group_Id']] = (lab_date, lab['Result']) return lab_latest
def get_concatenated_notes(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) date_key = extract_data.get_date_key(self.type) notes = [] sec_per_month = 24 * 60 * 60 * (365.0 / 12) if self.type in person.keys() and date_key != None: for i in range(len(person[self.type])): doc = person[self.type][i] date = extract_data.parse_date(doc[date_key]) if date != None and date < operation_date: if self.look_back_months and (operation_date - date).total_seconds() > (self.look_back_months * sec_per_month): continue notes.append(doc['free_text']) return '\n\n'.join(notes)
def get_encounters(empi): """Given an empi, returns a list of encounters for that patient sorted by Admit Date (since Discharge Date is not always recorded).""" p = loader.get_patient_by_EMPI(empi) encounters = [] if 'Enc' in p.keys(): for enc in p['Enc']: extra_diagnoses = 0 for i in range(1, 10): if enc['Diagnosis_' + str(i)]: extra_diagnoses += 1 if enc['Admit_Date']: encounters.append((extract_data.parse_date(enc['Admit_Date']), str(enc['Inpatient_Outpatient']), extract_data.parse_date(enc['Discharge_Date']), int(enc['LOS_Days']) if enc['LOS_Days'] else 0, extra_diagnoses)) encounters.sort(key = lambda x: x[0]) # just sort on Admit_Date return encounters
def get_supplemental_details(field_name): """Takes in the name of a field and prints how many of the patients have that field. Note that the field must be a top-level field (i.e. 'Car', 'Lno', etc.). This was used to test how many patients had the 'Supplemental' field.""" total = 0 field_count = 0 for i in range(907): try: p = loader.get_patient_by_EMPI("FAKE_EMPI_" + str(i)) if field_name in p.keys(): if p[field_name] != None: print(str(i) + ": " + str(len(p[field_name]))) field_count += 1 else: print(str(i) + ": " + str(0)) total += 1 except Exception as e: print(str(i) + " DOES NOT EXIST") continue print("RESULTS: " + str(field_count) + "/" + str(total))
def get_latest_concatenated_notes(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) date_key = extract_data.get_date_key(self.type) notes = [] if self.type in person.keys() and date_key != None: time_key_pairs = [] for i in range(len(person[self.type])): doc = person[self.type][i] date = extract_data.parse_date(doc[date_key]) if date != None and date < operation_date: time_key_pairs.append((operation_date - date, i)) time_key_pairs.sort() for time,key in time_key_pairs[:self.max_notes]: doc = person[self.type][key] notes.append(doc['free_text']) # ensure that notes vector length is equal to max_notes if len(notes) < self.max_notes: delta = self.max_notes - len(notes) for i in range(delta): notes.append('') return np.array(notes)
def get_feature(self, empi): """ description: performs the loops and conditionals to get at the desired documents and then returns the feature associated with the patient with the given EMPI input: empi string output: list or np.array of the feature """ patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) values = [] for doc_type in patient: if doc_type in self.doc_types or self.doc_types == None: docs = patient[doc_type] if type(docs) != type(list()): docs = [docs] for doc in docs: if self.select_doc(doc, operation_date, doc_type): value = self.parse_value(doc, operation_date, doc_type) if not value in [None, []]: values += value if type(value) == type(list()) else [value] return self.transform_values(values)
def get_labs_before_date(empi, date): """Given an empi and a date, will return the labs for that patient before that date. Specifically, will return four dictionaries where the key is always the lab group id and the values are the total counts, low counts, high counts, and latest (date, low/high) tuple for that test respectively. Note that low and high mean the test value was below or above the norm respectively.""" p = loader.get_patient_by_EMPI(empi) lab_counts = {} lab_lows = {} lab_highs = {} lab_latest = {} if 'Lab' in p.keys(): for lab in p['Lab']: if lab['Seq_Date_Time'] and extract_data.parse_date(lab['Seq_Date_Time']) < date: if lab['Group_Id'] in lab_counts: lab_counts[lab['Group_Id']] += 1 else: lab_counts[lab['Group_Id']] = 1 lab_date = extract_data.parse_date(lab['Seq_Date_Time']) if lab['Group_Id'] in lab_latest: recorded_test_date = lab_latest[lab['Group_Id']][0] if lab_date > recorded_test_date: # keep most recent test value lab_latest[lab['Group_Id']] = (lab_date, lab['Abnormal_Flag']) else: lab_latest[lab['Group_Id']] = (lab_date, lab['Abnormal_Flag']) if lab['Abnormal_Flag']: if lab['Abnormal_Flag'] == 'L': if lab['Group_Id'] in lab_lows: lab_lows[lab['Group_Id']] += 1 else: lab_lows[lab['Group_Id']] = 1 elif lab['Abnormal_Flag'] == 'H': if lab['Group_Id'] in lab_highs: lab_highs[lab['Group_Id']] += 1 else: lab_highs[lab['Group_Id']] = 1 return lab_counts, lab_lows, lab_highs, lab_latest
def get_feature(self, empi): """ description: performs the loops and conditionals to get at the desired documents and then returns the feature associated with the patient with the given EMPI input: empi string output: list or np.array of the feature """ patient = loader.get_patient_by_EMPI(empi) operation_date = extract_data.get_operation_date(patient) values = [] for doc_type in patient: if doc_type in self.doc_types or self.doc_types == None: docs = patient[doc_type] if type(docs) != type(list()): docs = [docs] for doc in docs: if self.select_doc(doc, operation_date, doc_type): value = self.parse_value(doc, operation_date, doc_type) if not value in [None, []]: values += value if type(value) == type( list()) else [value] return self.transform_values(values)
def get_encounters_details(empi): """Used in testing the Enc field to understand what subfields exist and what values they take""" p = loader.get_patient_by_EMPI(empi) interesting_fields = ['Admit_Date', 'Inpatient_Outpatient', 'Discharge_Date', 'LOS_Days', 'DRG'] for enc in p['Enc']: print('ENCOUNTER ' + enc['Encounter_number'] + ':') for field in interesting_fields: if enc[field]: print(field + ' = ' + str(enc[field])) extra_diagnoses = 0 for i in range(1, 10): if enc['Diagnosis_' + str(i)]: extra_diagnoses += 1 print('Extra Diagnoses = ' + str(extra_diagnoses)) print('') ins = 0 outs = 0 for enc in p['Enc']: if enc['Inpatient_Outpatient'] == 'Inpatient': ins += 1 else: outs += 1 print(str(ins) + ' Inpatients') print(str(outs) + ' Outpatients')
def get_lab_history_before_date(empi, date, time_thresholds_months): """Given an empi and a date, will return a summarized history of the labs for that patient before the date. Specifically, will return a dictionary where the key is a lab group id and the value is a list of size len(time_threshold_months) where each index represents whether the lab was mostly high or low in the threshold times set it time_thresholds_months. For example, if we have 'BUN' => ['H', None, 'L'], then this indicates a transition from low (L) to high (H) leading up to the indicated date.""" p = loader.get_patient_by_EMPI(empi) lab_history_counts = {} """ lab_history_counts is 2-D array first dimension = time period second dimension = counts of 'H', 'L', and None example = [[15, 1, 2], ...] means in the past 1 month, 'H' was most (15 times) """ seconds_in_month = 365 * 24 * 60 * 60 / 12 values = ['H', 'L', None] if 'Lab' in p.keys(): for lab in p['Lab']: if lab['Seq_Date_Time'] and extract_data.parse_date(lab['Seq_Date_Time']) < date: lab_date = extract_data.parse_date(lab['Seq_Date_Time']) value = lab['Abnormal_Flag'] if lab['Abnormal_Flag'] in ['H', 'L'] else None value_index = values.index(value) time_index = 0 while time_index < len(time_thresholds_months) and (date - lab_date).total_seconds() > (time_thresholds_months[time_index] * seconds_in_month): time_index += 1 if time_index >= len(time_thresholds_months): continue if lab['Group_Id'] not in lab_history_counts: lab_history_counts[lab['Group_Id']] = np.zeros([len(time_thresholds_months), len(values)]) lab_history_counts[lab['Group_Id']][time_index][value_index] += 1 lab_history = {} for lab_name in lab_history_counts: lab_history[lab_name] = [None] * len(time_thresholds_months) for i in range(len(time_thresholds_months)): lab_history[lab_name][i] = values[lab_history_counts[lab_name][i].argmax()] return lab_history
patient_empis, patient_efs = get_preprocessed_patients(sample_size=906) response_status = change_ef_values_to_categories(patient_efs) bigrams = [('Lno', [ "back pain", "daily nitroglycerin", "and palpitations", "sleep apnea", "admitted with", "has progressed", "married and", "father died" ]), ('Car', ["is normal"])] out = {} for (doc_type, patterns) in bigrams: for pattern in patterns: out[doc_type + pattern] = open( "bigram_data/" + doc_type + '_' + pattern.replace(' ', '_') + '_bigrams.txt', 'w') for (i, empi) in enumerate(patient_empis): print empi p = get_patient_by_EMPI(empi) for (doc_type, patterns) in bigrams: for doc in p[doc_type]: for pattern in patterns: if re.search(pattern, doc['free_text']): out[doc_type + pattern].write( "Patient: " + empi + " Outcome: " + ("Non-Response" if response_status[i] else "Response")) out[doc_type + pattern].write(doc['free_text']) for key in out.keys(): out[key].close()
import re from loader import get_patient_by_EMPI from extract_data import get_ef_value_notes from shared_values import get_supplemental_list keywords = ['(?:ef|ejection fraction)\s*(?:of|is)?[:\s]*([0-9]*\.?[0-9]*)\s*%'] allpatients = get_supplemental_list() for key, patients in itertools.groupby(enumerate(allpatients), lambda k: k[0] // 20): filename = "/home/ubuntu/www/turkTasks_" + str(key) + ".csv" print "Working on: " + filename rows = [] for (_, patient) in patients: print patient patient_data = get_patient_by_EMPI(patient) efnotes = get_ef_value_notes(patient_data) for (_, ef_value, note) in efnotes: note_id = note.split('\n')[1].split('|')[3] # change new line to html br note = note.replace("\r\n", "<br>") # bold found matches for keyword in keywords: pattern = re.compile(keyword) matches = re.finditer(pattern, note) offset = 0 for match in matches: start = match.start() + offset end = match.end() + offset
def get_encounters_features(self, empi): encounters = structured_data_extractor.get_encounters(empi) person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) operation_index = 0 for enc in encounters: if enc[0] < operation_date: operation_index += 1 else: break # only look at encounters before the operation encounters = encounters[:operation_index] features = [] # INDIVIDUAL ENCOUNTER FEATURES (3 x max_encounters) num_tracked_encounters = min(self.max_encounters, len(encounters)) # tracked_encounters below is sorted by increasing absolute time delta with operation date tracked_encounters = encounters[::-1][:num_tracked_encounters] inpatients = 0 total_LOS = 0 total_extra_diagnoses = 0 for enc in tracked_encounters: # INDIVIDUAL FEATURE 1 - Inpatient vs. Outpatient if enc[1] == 'Inpatient': features.append(1) inpatients += 1 else: features.append(0) # INDIVIDUAL FEATURE 2 - Length of Stay if enc[3] > 1: features.append(enc[3]) total_LOS += enc[3] else: features.append(0) # INDIVIDUAL FEATURE 3 - Number of Extra Diagnoses features.append(enc[4]) total_extra_diagnoses += enc[4] # fill in remaining vector space with zeros to make vector size = 3 x max_encounters if num_tracked_encounters < self.max_encounters: delta = self.max_encounters - num_tracked_encounters for i in range(delta): for j in range(3): features.append(0) # OVERALL ENCOUNTERS FEATURES (3) # OVERALL FEATURE 1 - Inpatient Ratio if len(tracked_encounters) > 0: features.append(inpatients / len(tracked_encounters)) else: features.append(0) # OVERALL FEATURE 2 - Average LOS if inpatients > 0: features.append(total_LOS / inpatients) else: features.append(0) # OVERALL FEATURE 3 - Average Extra Diagnoses if len(tracked_encounters) > 0: features.append(total_extra_diagnoses / len(tracked_encounters)) else: features.append(0) if self.only_general: features = features[-3:] return np.array(features)
import itertools import re from loader import get_patient_by_EMPI from extract_data import get_ef_value_notes from shared_values import get_supplemental_list keywords = ['(?:ef|ejection fraction)\s*(?:of|is)?[:\s]*([0-9]*\.?[0-9]*)\s*%'] allpatients = get_supplemental_list() for key, patients in itertools.groupby(enumerate(allpatients), lambda k: k[0]//20): filename = "/home/ubuntu/www/turkTasks_" + str(key) + ".csv" print "Working on: " + filename rows = [] for (_, patient) in patients: print patient patient_data = get_patient_by_EMPI(patient) efnotes = get_ef_value_notes(patient_data) for (_, ef_value, note) in efnotes: note_id = note.split('\n')[1].split('|')[3] # change new line to html br note = note.replace("\r\n", "<br>") # bold found matches for keyword in keywords: pattern = re.compile(keyword) matches = re.finditer(pattern, note) offset = 0 for match in matches: start = match.start() + offset end = match.end() + offset
import re from loader import get_patient_by_EMPI from model_tester import get_preprocessed_patients, change_ef_values_to_categories patient_empis, patient_efs = get_preprocessed_patients(sample_size=906) response_status = change_ef_values_to_categories(patient_efs) bigrams = [('Lno', ["back pain", "daily nitroglycerin", "and palpitations", "sleep apnea", "admitted with", "has progressed", "married and", "father died"]), ('Car', ["is normal"]) ] out = {} for (doc_type, patterns) in bigrams: for pattern in patterns: out[doc_type + pattern] = open("bigram_data/" + doc_type + '_' + pattern.replace(' ', '_') + '_bigrams.txt', 'w') for (i, empi) in enumerate(patient_empis): print empi p = get_patient_by_EMPI(empi) for (doc_type, patterns) in bigrams: for doc in p[doc_type]: for pattern in patterns: if re.search(pattern, doc['free_text']): out[doc_type + pattern].write("Patient: " + empi + " Outcome: " + ("Non-Response" if response_status[i] else "Response")) out[doc_type + pattern].write(doc['free_text']) for key in out.keys(): out[key].close()
def get_high_counts(self, empi): person = loader.get_patient_by_EMPI(empi) operation_date = build_graphs.get_operation_date(person) return structured_data_extractor.get_labs_before_date(empi, operation_date)[2]
import numpy as np from loader import get_patient_by_EMPI from model_tester import get_preprocessed_patients from value_extractor_transformer import EFTransformer, LBBBTransformer, SinusRhythmTransformer, QRSTransformer, NYHATransformer, NICMTransformer print "Evaluating EF:" if True: X, Y = get_preprocessed_patients(sample_size=906) supp = [] results = [] for i in range(len(X)): p = get_patient_by_EMPI(X[i]) if p['Supplemental']: supp.append(p['NEW_EMPI']) calculated_ef = Y[i] ef_delta = int(p['Supplemental']['changle LVEF']) empi = p['NEW_EMPI'] result = (empi, calculated_ef, ef_delta) results.append(result) print result print supp #print results else: supp = [u'FAKE_EMPI_2', u'FAKE_EMPI_8', u'FAKE_EMPI_10', u'FAKE_EMPI_11', u'FAKE_EMPI_12', u'FAKE_EMPI_14', u'FAKE_EMPI_16', u'FAKE_EMPI_20', u'FAKE_EMPI_28', u'FAKE_EMPI_29', u'FAKE_EMPI_36', u'FAKE_EMPI_37', u'FAKE_EMPI_38', u'FAKE_EMPI_45', u'FAKE_EMPI_46', u'FAKE_EMPI_52', u'FAKE_EMPI_53', u'FAKE_EMPI_55', u'FAKE_EMPI_56', u'FAKE_EMPI_57', u'FAKE_EMPI_63', u'FAKE_EMPI_64', u'FAKE_EMPI_66', u'FAKE_EMPI_67', u'FAKE_EMPI_68', u'FAKE_EMPI_69', u'FAKE_EMPI_80', u'FAKE_EMPI_82', u'FAKE_EMPI_88', u'FAKE_EMPI_90', u'FAKE_EMPI_91', u'FAKE_EMPI_95', u'FAKE_EMPI_98', u'FAKE_EMPI_99', u'FAKE_EMPI_100', u'FAKE_EMPI_101', u'FAKE_EMPI_103', u'FAKE_EMPI_108', u'FAKE_EMPI_109', u'FAKE_EMPI_119', u'FAKE_EMPI_120', u'FAKE_EMPI_122', u'FAKE_EMPI_123', u'FAKE_EMPI_124', u'FAKE_EMPI_126', u'FAKE_EMPI_129', u'FAKE_EMPI_135', u'FAKE_EMPI_141', u'FAKE_EMPI_161', u'FAKE_EMPI_162', u'FAKE_EMPI_165', u'FAKE_EMPI_166', u'FAKE_EMPI_170', u'FAKE_EMPI_175', u'FAKE_EMPI_178', u'FAKE_EMPI_181', u'FAKE_EMPI_185', u'FAKE_EMPI_186', u'FAKE_EMPI_190', u'FAKE_EMPI_191', u'FAKE_EMPI_193', u'FAKE_EMPI_194', u'FAKE_EMPI_196', u'FAKE_EMPI_197', u'FAKE_EMPI_200', u'FAKE_EMPI_201', u'FAKE_EMPI_203', u'FAKE_EMPI_206', u'FAKE_EMPI_209', u'FAKE_EMPI_210', u'FAKE_EMPI_213', u'FAKE_EMPI_215', u'FAKE_EMPI_216', u'FAKE_EMPI_224', u'FAKE_EMPI_225', u'FAKE_EMPI_226', u'FAKE_EMPI_227', u'FAKE_EMPI_228', u'FAKE_EMPI_234', u'FAKE_EMPI_238', u'FAKE_EMPI_240', u'FAKE_EMPI_241', u'FAKE_EMPI_242', u'FAKE_EMPI_254', u'FAKE_EMPI_257', u'FAKE_EMPI_263', u'FAKE_EMPI_269', u'FAKE_EMPI_270', u'FAKE_EMPI_275', u'FAKE_EMPI_281', u'FAKE_EMPI_282', u'FAKE_EMPI_286', u'FAKE_EMPI_287', u'FAKE_EMPI_289', u'FAKE_EMPI_290', u'FAKE_EMPI_292', u'FAKE_EMPI_293', u'FAKE_EMPI_294', u'FAKE_EMPI_297', u'FAKE_EMPI_301', u'FAKE_EMPI_302', u'FAKE_EMPI_305', u'FAKE_EMPI_306', u'FAKE_EMPI_309', u'FAKE_EMPI_310', u'FAKE_EMPI_311', u'FAKE_EMPI_312', u'FAKE_EMPI_313', u'FAKE_EMPI_315', u'FAKE_EMPI_316', u'FAKE_EMPI_317', u'FAKE_EMPI_322', u'FAKE_EMPI_323', u'FAKE_EMPI_326', u'FAKE_EMPI_327', u'FAKE_EMPI_333', u'FAKE_EMPI_342', u'FAKE_EMPI_344', u'FAKE_EMPI_349', u'FAKE_EMPI_355', u'FAKE_EMPI_358', u'FAKE_EMPI_359', u'FAKE_EMPI_360', u'FAKE_EMPI_361', u'FAKE_EMPI_362', u'FAKE_EMPI_365', u'FAKE_EMPI_368', u'FAKE_EMPI_380', u'FAKE_EMPI_382', u'FAKE_EMPI_386', u'FAKE_EMPI_390', u'FAKE_EMPI_391', u'FAKE_EMPI_392', u'FAKE_EMPI_396', u'FAKE_EMPI_397', u'FAKE_EMPI_399', u'FAKE_EMPI_401', u'FAKE_EMPI_402', u'FAKE_EMPI_407', u'FAKE_EMPI_419', u'FAKE_EMPI_425', u'FAKE_EMPI_427', u'FAKE_EMPI_428', u'FAKE_EMPI_430', u'FAKE_EMPI_432', u'FAKE_EMPI_434', u'FAKE_EMPI_436', u'FAKE_EMPI_438', u'FAKE_EMPI_440', u'FAKE_EMPI_441', u'FAKE_EMPI_443', u'FAKE_EMPI_445', u'FAKE_EMPI_446', u'FAKE_EMPI_447', u'FAKE_EMPI_463', u'FAKE_EMPI_465', u'FAKE_EMPI_480', u'FAKE_EMPI_481', u'FAKE_EMPI_484', u'FAKE_EMPI_488', u'FAKE_EMPI_491', u'FAKE_EMPI_496', u'FAKE_EMPI_497', u'FAKE_EMPI_500', u'FAKE_EMPI_501', u'FAKE_EMPI_508', u'FAKE_EMPI_510', u'FAKE_EMPI_513', u'FAKE_EMPI_514', u'FAKE_EMPI_515', u'FAKE_EMPI_526', u'FAKE_EMPI_528', u'FAKE_EMPI_531', u'FAKE_EMPI_534', u'FAKE_EMPI_536', u'FAKE_EMPI_539', u'FAKE_EMPI_541', u'FAKE_EMPI_547', u'FAKE_EMPI_549', u'FAKE_EMPI_559', u'FAKE_EMPI_563', u'FAKE_EMPI_568', u'FAKE_EMPI_569', u'FAKE_EMPI_570', u'FAKE_EMPI_582', u'FAKE_EMPI_583', u'FAKE_EMPI_587', u'FAKE_EMPI_590', u'FAKE_EMPI_591', u'FAKE_EMPI_607', u'FAKE_EMPI_613', u'FAKE_EMPI_614', u'FAKE_EMPI_616', u'FAKE_EMPI_619', u'FAKE_EMPI_622', u'FAKE_EMPI_623', u'FAKE_EMPI_627', u'FAKE_EMPI_631', u'FAKE_EMPI_633', u'FAKE_EMPI_634', u'FAKE_EMPI_635', u'FAKE_EMPI_638', u'FAKE_EMPI_639', u'FAKE_EMPI_642', u'FAKE_EMPI_644', u'FAKE_EMPI_650', u'FAKE_EMPI_651', u'FAKE_EMPI_656', u'FAKE_EMPI_658', u'FAKE_EMPI_660', u'FAKE_EMPI_662', u'FAKE_EMPI_663', u'FAKE_EMPI_668', u'FAKE_EMPI_670', u'FAKE_EMPI_673', u'FAKE_EMPI_675', u'FAKE_EMPI_677', u'FAKE_EMPI_678', u'FAKE_EMPI_679', u'FAKE_EMPI_682', u'FAKE_EMPI_691', u'FAKE_EMPI_694', u'FAKE_EMPI_696', u'FAKE_EMPI_705', u'FAKE_EMPI_706', u'FAKE_EMPI_708', u'FAKE_EMPI_710', u'FAKE_EMPI_713', u'FAKE_EMPI_715', u'FAKE_EMPI_716', u'FAKE_EMPI_723', u'FAKE_EMPI_729', u'FAKE_EMPI_730', u'FAKE_EMPI_731', u'FAKE_EMPI_733', u'FAKE_EMPI_735', u'FAKE_EMPI_738', u'FAKE_EMPI_739', u'FAKE_EMPI_741', u'FAKE_EMPI_744', u'FAKE_EMPI_748', u'FAKE_EMPI_751', u'FAKE_EMPI_753', u'FAKE_EMPI_757', u'FAKE_EMPI_762', u'FAKE_EMPI_768', u'FAKE_EMPI_769', u'FAKE_EMPI_774', u'FAKE_EMPI_777', u'FAKE_EMPI_780', u'FAKE_EMPI_781', u'FAKE_EMPI_785', u'FAKE_EMPI_790', u'FAKE_EMPI_792', u'FAKE_EMPI_800', u'FAKE_EMPI_803', u'FAKE_EMPI_807', u'FAKE_EMPI_820', u'FAKE_EMPI_824', u'FAKE_EMPI_826', u'FAKE_EMPI_827', u'FAKE_EMPI_829', u'FAKE_EMPI_830', u'FAKE_EMPI_832', u'FAKE_EMPI_838', u'FAKE_EMPI_839', u'FAKE_EMPI_840', u'FAKE_EMPI_843', u'FAKE_EMPI_851', u'FAKE_EMPI_853', u'FAKE_EMPI_858', u'FAKE_EMPI_859', u'FAKE_EMPI_863', u'FAKE_EMPI_866', u'FAKE_EMPI_873', u'FAKE_EMPI_876', u'FAKE_EMPI_878', u'FAKE_EMPI_881', u'FAKE_EMPI_884', u'FAKE_EMPI_885', u'FAKE_EMPI_886', u'FAKE_EMPI_891', u'FAKE_EMPI_895', u'FAKE_EMPI_900', u'FAKE_EMPI_903', u'FAKE_EMPI_904'] results = [(u'FAKE_EMPI_2', 0.0, 11), (u'FAKE_EMPI_8', 10.0, 10), (u'FAKE_EMPI_10', 14.0, 11), (u'FAKE_EMPI_11', 13.0, 8), (u'FAKE_EMPI_12', 26.0, 1), (u'FAKE_EMPI_14', 32.0, 21), (u'FAKE_EMPI_16', 14.0, -1), (u'FAKE_EMPI_20', 15.0, 23), (u'FAKE_EMPI_28', 37.0, 0), (u'FAKE_EMPI_29', 0.0, -3), (u'FAKE_EMPI_36', 22.0, 30), (u'FAKE_EMPI_37', -2.0, -2), (u'FAKE_EMPI_38', -24.0, 3), (u'FAKE_EMPI_45', 11.0, 21), (u'FAKE_EMPI_46', 0.0, 16), (u'FAKE_EMPI_52', 24.0, 18), (u'FAKE_EMPI_53', 14.0, -7), (u'FAKE_EMPI_55', 1.0, 4), (u'FAKE_EMPI_56', 18.0, 4), (u'FAKE_EMPI_57', 12.0, -2), (u'FAKE_EMPI_63', 26.0, 26), (u'FAKE_EMPI_64', 11.0, 10), (u'FAKE_EMPI_66', 4.0, 4), (u'FAKE_EMPI_67', 16.0, 16), (u'FAKE_EMPI_68', 20.0, 23), (u'FAKE_EMPI_69', 1.0, -5), (u'FAKE_EMPI_80', -3.0, 11), (u'FAKE_EMPI_82', 6.0, -4), (u'FAKE_EMPI_88', 15.0, 15), (u'FAKE_EMPI_90', 7.0, 7), (u'FAKE_EMPI_91', 21.0, 20), (u'FAKE_EMPI_95', 24.0, 24), (u'FAKE_EMPI_98', 4.0, 8), (u'FAKE_EMPI_99', 0.0, -5), (u'FAKE_EMPI_100', 18.0, 27), (u'FAKE_EMPI_101', 15.0, 8), (u'FAKE_EMPI_103', 2.0, 2), (u'FAKE_EMPI_108', 1.0, 1), (u'FAKE_EMPI_109', -2.0, -2), (u'FAKE_EMPI_119', -24.0, 6), (u'FAKE_EMPI_120', 15.0, 19), (u'FAKE_EMPI_122', 0.0, -2), (u'FAKE_EMPI_123', 37.0, 29), (u'FAKE_EMPI_124', 4.0, 6), (u'FAKE_EMPI_126', 7.0, 14), (u'FAKE_EMPI_129', 0.0, -4), (u'FAKE_EMPI_135', -28.0, -9), (u'FAKE_EMPI_141', 35.0, 31), (u'FAKE_EMPI_161', 10.0, 5), (u'FAKE_EMPI_162', 28.0, 16), (u'FAKE_EMPI_165', 11.0, 5), (u'FAKE_EMPI_166', -23.0, 5), (u'FAKE_EMPI_170', 11.0, 24), (u'FAKE_EMPI_175', 26.0, 13), (u'FAKE_EMPI_178', 15.0, 11), (u'FAKE_EMPI_181', -1.0, 12), (u'FAKE_EMPI_185', 0.0, -1), (u'FAKE_EMPI_186', 2.0, 4), (u'FAKE_EMPI_190', 47.0, 39), (u'FAKE_EMPI_191', 15.0, 15), (u'FAKE_EMPI_193', 2.0, 2), (u'FAKE_EMPI_194', -4.0, 4), (u'FAKE_EMPI_196', -2.0, -4), (u'FAKE_EMPI_197', -3.0, -1), (u'FAKE_EMPI_200', 5.0, 8), (u'FAKE_EMPI_201', 3.0, 4), (u'FAKE_EMPI_203', 1.0, 1), (u'FAKE_EMPI_206', 5.0, 12), (u'FAKE_EMPI_209', 1.0, 4), (u'FAKE_EMPI_210', 17.0, 17), (u'FAKE_EMPI_213', 7.0, 6), (u'FAKE_EMPI_215', -5.0, -8), (u'FAKE_EMPI_216', 65.0, 6), (u'FAKE_EMPI_224', 20.0, 9), (u'FAKE_EMPI_225', 8.0, 12), (u'FAKE_EMPI_226', 4.0, 9), (u'FAKE_EMPI_227', -10.0, 16), (u'FAKE_EMPI_228', 12.0, 12), (u'FAKE_EMPI_234', 15.0, 9), (u'FAKE_EMPI_238', 13.0, 13), (u'FAKE_EMPI_240', 22.0, 7), (u'FAKE_EMPI_241', -18.0, 6), (u'FAKE_EMPI_242', -1.0, 3), (u'FAKE_EMPI_254', 9.0, 9), (u'FAKE_EMPI_257', 5.0, -1), (u'FAKE_EMPI_263', 4.0, 3), (u'FAKE_EMPI_269', -1.0, -1), (u'FAKE_EMPI_270', 6.0, 20), (u'FAKE_EMPI_275', 18.0, 0), (u'FAKE_EMPI_281', -3.0, -2), (u'FAKE_EMPI_282', -8.0, -6), (u'FAKE_EMPI_286', 13.0, 13), (u'FAKE_EMPI_287', 25.0, 10), (u'FAKE_EMPI_289', 22.0, 21), (u'FAKE_EMPI_290', -7.0, 8), (u'FAKE_EMPI_292', 5.0, 7), (u'FAKE_EMPI_293', 9.0, 10), (u'FAKE_EMPI_294', 6.0, 17), (u'FAKE_EMPI_297', 15.0, 24), (u'FAKE_EMPI_301', -3.0, -3), (u'FAKE_EMPI_302', 24.0, 19), (u'FAKE_EMPI_305', 3.0, 1), (u'FAKE_EMPI_306', 16.0, 11), (u'FAKE_EMPI_309', 14.0, 14), (u'FAKE_EMPI_310', 16.0, 11), (u'FAKE_EMPI_311', 3.0, -5), (u'FAKE_EMPI_312', 41.0, 3), (u'FAKE_EMPI_313', 22.0, 2), (u'FAKE_EMPI_315', 16.0, 6), (u'FAKE_EMPI_316', 21.0, 22), (u'FAKE_EMPI_317', -16.0, -6), (u'FAKE_EMPI_322', -14.0, 0), (u'FAKE_EMPI_323', 9.0, 8), (u'FAKE_EMPI_326', 11.0, 11), (u'FAKE_EMPI_327', 17.0, 17), (u'FAKE_EMPI_333', -20.0, -10), (u'FAKE_EMPI_342', 28.0, 25), (u'FAKE_EMPI_344', -5.0, -1), (u'FAKE_EMPI_349', 63.0, -4), (u'FAKE_EMPI_355', 1.0, 5), (u'FAKE_EMPI_358', -19.0, -9), (u'FAKE_EMPI_359', 27.0, 9), (u'FAKE_EMPI_360', -11.0, -11), (u'FAKE_EMPI_361', -14.0, 0), (u'FAKE_EMPI_362', 27.0, 25), (u'FAKE_EMPI_365', 15.0, 20), (u'FAKE_EMPI_368', -9.0, -9), (u'FAKE_EMPI_380', -8.0, 4), (u'FAKE_EMPI_382', -1.0, 1), (u'FAKE_EMPI_386', 4.0, -1), (u'FAKE_EMPI_390', 8.0, 8), (u'FAKE_EMPI_391', 5.0, -2), (u'FAKE_EMPI_392', 36.0, 29), (u'FAKE_EMPI_396', 24.0, 24), (u'FAKE_EMPI_397', 62.0, -1), (u'FAKE_EMPI_399', -4.0, 8), (u'FAKE_EMPI_401', 44.0, 29), (u'FAKE_EMPI_402', 0.0, 15), (u'FAKE_EMPI_407', -3.0, -4), (u'FAKE_EMPI_419', 24.0, 14), (u'FAKE_EMPI_425', -3.0, -3), (u'FAKE_EMPI_427', 1.0, 1), (u'FAKE_EMPI_428', 27.0, 16), (u'FAKE_EMPI_430', 1.0, 2), (u'FAKE_EMPI_432', 27.0, 24), (u'FAKE_EMPI_434', 4.0, 2), (u'FAKE_EMPI_436', 17.0, 8), (u'FAKE_EMPI_438', 21.0, 13), (u'FAKE_EMPI_440', 3.0, 8), (u'FAKE_EMPI_441', 15.0, 14), (u'FAKE_EMPI_443', 4.0, 4), (u'FAKE_EMPI_445', 10.0, 10), (u'FAKE_EMPI_446', 19.0, 19), (u'FAKE_EMPI_447', 3.0, 3), (u'FAKE_EMPI_463', 6.0, 6), (u'FAKE_EMPI_465', 10.0, 10), (u'FAKE_EMPI_480', 4.0, 6), (u'FAKE_EMPI_481', 24.0, 12), (u'FAKE_EMPI_484', -3.0, -4), (u'FAKE_EMPI_488', 22.0, 13), (u'FAKE_EMPI_491', -4.0, -4), (u'FAKE_EMPI_496', 24.0, 10), (u'FAKE_EMPI_497', -1.0, -1), (u'FAKE_EMPI_500', 12.0, 12), (u'FAKE_EMPI_501', 17.0, 12), (u'FAKE_EMPI_508', 0.0, -3), (u'FAKE_EMPI_510', 20.0, 3), (u'FAKE_EMPI_513', 16.0, 6), (u'FAKE_EMPI_514', 18.0, 8), (u'FAKE_EMPI_515', 4.0, 4), (u'FAKE_EMPI_526', 38.0, 26), (u'FAKE_EMPI_528', 28.0, 17), (u'FAKE_EMPI_531', 5.0, 0), (u'FAKE_EMPI_534', 25.0, 3), (u'FAKE_EMPI_536', -6.0, -6), (u'FAKE_EMPI_539', 15.0, 0), (u'FAKE_EMPI_541', 1.0, 4), (u'FAKE_EMPI_547', 14.0, 7), (u'FAKE_EMPI_549', 7.0, 3), (u'FAKE_EMPI_559', -5.0, -3), (u'FAKE_EMPI_563', 19.0, 9), (u'FAKE_EMPI_568', 14.0, 14), (u'FAKE_EMPI_569', 17.0, 17), (u'FAKE_EMPI_570', 26.0, 16), (u'FAKE_EMPI_582', -20.0, -9), (u'FAKE_EMPI_583', 2.0, 2), (u'FAKE_EMPI_587', 2.0, -1), (u'FAKE_EMPI_590', 5.0, 4), (u'FAKE_EMPI_591', 7.0, -7), (u'FAKE_EMPI_607', 3.0, -1), (u'FAKE_EMPI_613', 18.0, 4), (u'FAKE_EMPI_614', 23.0, 7), (u'FAKE_EMPI_616', 18.0, 12), (u'FAKE_EMPI_619', -2.0, -3), (u'FAKE_EMPI_622', 11.0, 10), (u'FAKE_EMPI_623', 0.0, 15), (u'FAKE_EMPI_627', 22.0, 1), (u'FAKE_EMPI_631', 5.0, 5), (u'FAKE_EMPI_633', 1.0, 0), (u'FAKE_EMPI_634', -2.0, -2), (u'FAKE_EMPI_635', -4.0, -4), (u'FAKE_EMPI_638', -1.0, 0), (u'FAKE_EMPI_639', -25.0, 23), (u'FAKE_EMPI_642', 9.0, 9), (u'FAKE_EMPI_644', 22.0, 12), (u'FAKE_EMPI_650', 41.0, -9), (u'FAKE_EMPI_651', 20.0, 14), (u'FAKE_EMPI_656', 6.0, 6), (u'FAKE_EMPI_658', 11.0, 12), (u'FAKE_EMPI_660', -21.0, -5), (u'FAKE_EMPI_662', 20.0, 10), (u'FAKE_EMPI_663', 4.0, 0), (u'FAKE_EMPI_668', 20.0, 13), (u'FAKE_EMPI_670', -5.0, -5), (u'FAKE_EMPI_673', 22.0, 30), (u'FAKE_EMPI_675', 31.0, 2), (u'FAKE_EMPI_677', 2.0, 2), (u'FAKE_EMPI_678', 23.0, 23), (u'FAKE_EMPI_679', 63.0, -6), (u'FAKE_EMPI_682', 1.0, -3), (u'FAKE_EMPI_691', 12.0, 6), (u'FAKE_EMPI_694', 4.0, 5), (u'FAKE_EMPI_696', 11.0, 19), (u'FAKE_EMPI_705', -2.0, -2), (u'FAKE_EMPI_706', 5.0, 5), (u'FAKE_EMPI_708', 5.0, -3), (u'FAKE_EMPI_710', 8.0, 1), (u'FAKE_EMPI_713', 22.0, 10), (u'FAKE_EMPI_715', 16.0, 21), (u'FAKE_EMPI_716', -16.0, -2), (u'FAKE_EMPI_723', 20.0, 3), (u'FAKE_EMPI_729', 7.0, 25), (u'FAKE_EMPI_730', 8.0, 10), (u'FAKE_EMPI_731', -14.0, -4), (u'FAKE_EMPI_733', 15.0, 0), (u'FAKE_EMPI_735', 25.0, 15), (u'FAKE_EMPI_738', 2.0, -1), (u'FAKE_EMPI_739', -1.0, 14), (u'FAKE_EMPI_741', 0.0, 1), (u'FAKE_EMPI_744', 1.0, 1), (u'FAKE_EMPI_748', -10.0, -10), (u'FAKE_EMPI_751', 7.0, 7), (u'FAKE_EMPI_753', -2.0, -1), (u'FAKE_EMPI_757', 7.0, -6), (u'FAKE_EMPI_762', 33.0, 21), (u'FAKE_EMPI_768', 0.0, 0), (u'FAKE_EMPI_769', 13.0, 7), (u'FAKE_EMPI_774', -15.0, 12), (u'FAKE_EMPI_777', 20.0, 17), (u'FAKE_EMPI_780', -3.0, -3), (u'FAKE_EMPI_781', 9.0, 9), (u'FAKE_EMPI_785', 19.0, 9), (u'FAKE_EMPI_790', 36.0, 36), (u'FAKE_EMPI_792', 11.0, -4), (u'FAKE_EMPI_800', 29.0, 21), (u'FAKE_EMPI_803', 3.0, 2), (u'FAKE_EMPI_807', 4.0, 4), (u'FAKE_EMPI_820', -1.0, 4), (u'FAKE_EMPI_824', 28.0, 9), (u'FAKE_EMPI_826', 16.0, 10), (u'FAKE_EMPI_827', 2.0, 2), (u'FAKE_EMPI_829', 17.0, -10), (u'FAKE_EMPI_830', -19.0, 6), (u'FAKE_EMPI_832', 13.0, 12), (u'FAKE_EMPI_838', 8.0, 7), (u'FAKE_EMPI_839', 18.0, -1), (u'FAKE_EMPI_840', 4.0, 2), (u'FAKE_EMPI_843', 6.0, 2), (u'FAKE_EMPI_851', 16.0, -2), (u'FAKE_EMPI_853', -3.0, -3), (u'FAKE_EMPI_858', -20.0, -5), (u'FAKE_EMPI_859', 27.0, 27), (u'FAKE_EMPI_863', 24.0, -2), (u'FAKE_EMPI_866', -9.0, -13), (u'FAKE_EMPI_873', 24.0, 23), (u'FAKE_EMPI_876', 3.0, 13), (u'FAKE_EMPI_878', 9.0, 9), (u'FAKE_EMPI_881', 9.0, 9), (u'FAKE_EMPI_884', 13.0, 3), (u'FAKE_EMPI_885', 16.0, 9), (u'FAKE_EMPI_886', 2.0, 1), (u'FAKE_EMPI_891', -3.0, 3), (u'FAKE_EMPI_895', 8.0, 16), (u'FAKE_EMPI_900', 12.0, -3), (u'FAKE_EMPI_903', 1.0, -7), (u'FAKE_EMPI_904', -8.0, -12)] def report_standard_metrics(X,Y):
date_to_diagnoses = get_date_to_diagnoses(empi) chronic_diagnoses = get_chronic_diagnoses(empi, 90) start_date = diagnoses[0][0] end_date = diagnoses[-1][0] print("~~~~~~~~~~~~~~~~") print("Start Date: " + str(start_date)) print("End Date: " + str(end_date)) print("Num. of Entries: " + str(len(diagnoses))) print("Num. of Visits: " + str(len(date_to_diagnoses))) # print("Chronic Diagnoses: " + str(chronic_diagnoses)) elif command == 'encounter': encounters = get_encounters(empi) for enc in encounters: print(enc) #get_encounters_details(empi) elif command == 'labs': """ lab_counts, lab_lows, lab_highs, lab_latest = get_labs_before_date(empi, extract_data.parse_date('11/16/2015')) for lab in lab_counts: print(lab) print('COUNT: ' + str(lab_counts[lab])) print('LOWS: ' + str(lab_lows[lab]) if lab in lab_lows else 'LOWS: 0') print('HIGHS: ' + str(lab_highs[lab]) if lab in lab_highs else 'HIGHS: 0') print('LATEST: ' + str(lab_latest[lab])) print('') """ operation_date = build_graphs.get_operation_date(loader.get_patient_by_EMPI(empi)) lab_values = get_recent_lab_values(empi, operation_date) for lab in lab_values: print(str(lab) + ": " + str(lab_values[lab]))