Пример #1
0
def getMail():
    scope = [
        'https://www.googleapis.com/auth/spreadsheets',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        'WEBAPP-07c5da9a07a4.json', scope)
    gc = gspread.authorize(credentials)
    sheet = gc.open("Results").sheet1
    email = sheet.col_values(2)
    #name = sheet.col_values(3)
    limit = 0
    for j in range(1, sheet.row_count):
        limit = limit + 1
        if sheet.cell(row=j, col=3).value == '':
            break
    limit = limit - 1

    for i in range(1, limit):
        data.append({email[i]})
        write_csv(data)
Пример #2
0
def main():
    #load user data
    atk_csv = load_csv('user_data/atk.csv')
    for row in atk_csv:
        print(row)
    def_csv = load_csv('user_data/def.csv')
    for row in def_csv:
        print(row)
    hp_csv = load_csv('user_data/hp.csv')
    for row in hp_csv:
        print(row)
    prio_csv = load_csv('user_data/priority.csv')
    for row in prio_csv:
        print(row)

    # load reference data
    items_csv = load_csv('reference_data/items.csv')

    # calculate best armor sets
    atk_def_sets, hp_def_sets = calculate_best_armor_sets(atk_csv, def_csv, hp_csv, prio_csv, items_csv)

    # write output to files
    write_csv(atk_csv[0].keys(), atk_def_sets, 'output/atk_def_sets.csv')
    write_csv(hp_csv[0].keys(), hp_def_sets, 'output/hp_def_sets.csv')
Пример #3
0
        patient_row = patient_rows[row_index]
    else:
        patient_row = patient_rows[0]

    patient_row[status_col] = patient_status

    filtered_patient_data.append(patient_row)

# Add vital status
df = sql_query("SELECT dossier FROM " + \
  "dw_test.orcl_cichum_bendeces_live WHERE " + \
  "dossier in ('" + "', '".join(patient_mrns) + "') " + \
  "AND dhredeces > '2020-01-01'")

dead = [row.dossier for index, row in df.iterrows()]
final_patient_data = []

for row in filtered_patient_data:
    final_row = row
    if row[0] in dead:
        final_row = final_row + ['dead']
    else:
        final_row = final_row + ['alive']
    if row[0] in admitted_mrns:
        final_row = final_row + ['yes']
    else:
        final_row = final_row + ['no']
    final_patient_data.append(final_row)

write_csv(TABLE_COLUMNS['patient_data'], final_patient_data,
          os.path.join(CSV_DIRECTORY, 'patient_data.csv'))
Пример #4
0
    else:
        lab_result_status = 'resulted'

    delta_hours = get_hours_between_datetimes(
        pcr_sample_times[str(patient_mrn)], str(lab_sample_time))

    if delta_hours > -48 and delta_hours < 7 * 24:

        mapped_lab_name = map_lab_name(lab_name)
        mapped_lab_sample_site = map_lab_sample_site(lab_name, lab_sample_site)

        try:
            mapped_lab_value = map_lab_result_value(lab_result_string)
        except:
            print('Invalid lab value: ' + str(lab_result_string))
            continue

        if mapped_lab_name == 'venous_o2_sat':
            print(mapped_lab_value)

        lab_data_rows.append([
            patient_mrn, mapped_lab_name, mapped_lab_sample_site,
            map_time(lab_sample_time),
            map_time(lab_result_time), lab_result_status, lab_result_units,
            lab_result_string, mapped_lab_value
        ])

print('Total rows: %d' % len(lab_data_rows))

write_csv(TABLE_COLUMNS['lab_data'], lab_data_rows,
          os.path.join(CSV_DIRECTORY, 'lab_data.csv'))
Пример #5
0
    
    if hours < -48: continue
    
    patients_with_imaging.append(row_patient_mrn)
    imaging_accession_numbers.append(row.accession_number)
      
    imaging_accession_uid = generate_accession_uid(row.accession_number)
    imaging_acquired_time = row.date_heure_debut_examen
    
    if 'rx' in lower_desc: modality = 'xr'
    elif 'scan' in lower_desc: modality = 'ct'

    imaging_data_rows.append([
      row_patient_mrn,
      imaging_accession_uid,
      modality, 'chest',
      map_time(imaging_acquired_time)
    ])

patients_with_imaging = np.unique(patients_with_imaging)
imaging_accession_numbers = np.unique(imaging_accession_numbers)

print('Number of patients with imaging: %d' % \
  len(patients_with_imaging))

write_csv(['accession_number'], \
 [[x] for x in imaging_accession_numbers], \
 IMAGING_LIST_FILENAME)

write_csv(TABLE_COLUMNS['imaging_data'], imaging_data_rows, 
  os.path.join(CSV_DIRECTORY, 'imaging_data.csv'))
Пример #6
0
  if delta > 0:
    last_episode_by_dossier[patient_mrn] = episode

  episode_ids.append(episode_id)
  episodes_by_id[episode_id] = episode

df = sql_query("SELECT DISTINCT * FROM dw_test.orcl_cichum_bendeces_live WHERE " + \
  "dossier in ('" + "', '".join(patient_mrns) + "') " + \
  "AND dhredeces > '2020-01-01'")

for index, row in df.iterrows():
  patient_mrn = str(row.dossier)
  
  if patient_mrn in last_episode_by_dossier:
    last_episode_for_dossier = last_episode_by_dossier[patient_mrn]
    episode_id = last_episode_for_dossier['episode_id']
  else:
    episode_id = ''
  
  diagnosis_type = 'death'
  diagnosis_time = row.dhredeces

  diagnosis_data_rows.append([
    patient_mrn, episode_id, diagnosis_type, 
    'death', '', diagnosis_time
  ])

print('Total rows: %d' % len(diagnosis_data_rows))

write_csv(TABLE_COLUMNS['diagnosis_data'], diagnosis_data_rows, 
  os.path.join(CSV_DIRECTORY, 'diagnosis_data.csv'))
for row in patient_data_rows:
    patient_mrn = str(row[0])
    patient_mrns.append(patient_mrn)
    pcr_sample_times[patient_mrn] = row[2]

df = sql_query("SELECT * from dw_test.orcl_hev_bipap_live WHERE " + \
  "start_dtm > '2020-01-01' AND dossier in (" + ", ".join(patient_mrns) + ")")

intervention_data_rows = []

for index, row in df.iterrows():

    patient_mrn = str(int(row.dossier))
    intervention_start_time = str(row.start_dtm)
    intervention_end_time = str(row.end_dtm)

    delta_hours = get_hours_between_datetimes(pcr_sample_times[patient_mrn],
                                              intervention_start_time)

    if delta_hours < -48: continue

    intervention_data_rows.append([
        patient_mrn, 'mechanical_ventilation', intervention_start_time,
        intervention_end_time
    ])

print('Total rows: %d' % len(intervention_data_rows))

write_csv(TABLE_COLUMNS['intervention_data'], intervention_data_rows,
          os.path.join(CSV_DIRECTORY, 'intervention_data.csv'))
Пример #8
0
from scrape import scraper
from file_utils import read_csv, write_csv, read_file, write_file
from generate import _render_template, preprocess
from image import pass_gen
from mail import sendmail
import json

# Scraping the webpage and storing the data in a csv
data = scraper('http://scrape.kjscecodecell.com/')
write_csv(data)

# Reading the scraped data from the csv and preprocessing the data
participants = read_csv()
participants = preprocess(participants)

# Getting the list of mails to whom mails have already been sent
sent_mails = read_file()

# Looping over all participants
for participant in participants:
    # Checking if the participant was sent a mail previously
    if participant['email'] not in sent_mails:
        name = participant['name']
        email = participant['email']
        phone = participant['phone']
        payment_status = participant['payment']

        # Generating a message from the template
        message = _render_template(name, payment_status)

        # Generating a custom image
Пример #9
0
for index, row in df.iterrows():

  patient_mrn = str(row.dossier)
  culture_type = str(row.longdesc).lower() 

  # Skip analyses that were flagged as unavailable
  if 'non disponible' in culture_type:
    continue

  culture_sample_site = row.specimencollectionmethodcd
  culture_sample_time = row.specimencollectiondtm
  culture_result_time = row.resultdtm
  culture_growth_value = row.growthcd

  #if row.growthcd is not None and 'pos' in row.growthcd.lower():
  #  print(row)

  culture_data_rows.append([
    patient_mrn, 
    map_culture_type(row.longdesc), 
    map_culture_specimen_type(culture_type, culture_sample_site),
    culture_sample_time, 
    culture_result_time, 
    map_culture_growth_value(culture_growth_value),
    map_culture_result_status(culture_growth_value)
  ])

print('Total rows: %d' % len(culture_data_rows))

write_csv(TABLE_COLUMNS['culture_data'], culture_data_rows, 
  os.path.join(CSV_DIRECTORY, 'culture_data.csv'))
Пример #10
0
    for unique_value in unique_values:
      count = np.count_nonzero(column == unique_value)
      if count < 5:
        excluded_values_by_column[column_name].append(unique_value)

    column_index += 1
  
  censored_csv_rows = []

  for filtered_csv_row in filtered_csv_rows:
    is_censored = False
    column_index = 0
    for column_name in column_names:
      item = filtered_csv_row[column_index]
      if item in excluded_values_by_column[column_name]:
        is_censored = True
      column_index += 1
    if not is_censored:
      censored_csv_rows.append(filtered_csv_row)
  
  print('\n\n' + table_name + '\n\n')

  for i in range(0, int(np.ceil(len(column_names) / 3))):
    interval_start = i*3
    interval_end = np.min([i*3+2, len(column_names)-1])
    columns = column_names[interval_start:interval_end+1]
    tabulate_columns(columns, censored_csv_rows, offset=interval_start)
  
  filtered_table_file_name = os.path.join(MILA_CSV_DIRECTORY, table_name + '.csv')
  write_csv(column_names, censored_csv_rows, filtered_table_file_name)
Пример #11
0
      
      # Get length of stay
      episode_duration_hours = int(get_hours_between_datetimes(
        episode_start_time, episode_end_time, default_now=True
      ))
      # Skip if ADMISSION started > 5 days after first PCR test
      found_close = False
      for pcr_time in pcr_sample_times[patient_mrn]:

        hours_delta = get_hours_between_datetimes(
          admission_start_time, pcr_time)

        if hours_delta > -24*7 and hours_delta < 24*30: 
          found_close = True
      
      if not found_close: continue
      
      episode_data_rows.append([
        patient_mrn, 
        episode_id,
        episode_unit_type,
        episode_start_time, 
        episode_end_time,
        episode_description,
        episode_duration_hours
      ])

print('Total rows: %d' % len(episode_data_rows))

write_csv(TABLE_COLUMNS['episode_data'], episode_data_rows, 
  os.path.join(CSV_DIRECTORY, 'episode_data.csv'))
Пример #12
0
    if patient_mrn in patient_mrns:

        pcr_result_time = row[-6]
        pcr_sample_time = row[-7]
        pcr_result_value = row[-4]

        if patient_mrn not in pcr_sample_times:
            pcr_sample_times[patient_mrn] = []

        if str(pcr_sample_time) in pcr_sample_times[patient_mrn]:
            #if DEBUG: print('Skipping duplicate PCRs')
            continue
        else:
            pcr_sample_times[patient_mrn].append(str(pcr_sample_time))

        pcr_data_rows.append([
            patient_mrn,
            map_pcr_name('covid-19 pcr'),
            map_pcr_sample_site('covid-19 pcr', 'écouvillon nasal'),
            map_time(pcr_sample_time),
            map_time(pcr_result_time),
            map_pcr_result_value(pcr_result_value),
            map_pcr_result_status(pcr_result_value)
        ])

print('Total rows: %d' % len(pcr_data_rows))

write_csv(TABLE_COLUMNS['pcr_data'],
          pcr_data_rows,
          os.path.join(CSV_DIRECTORY, 'pcr_data.csv'),
          remove_duplicates=True)
Пример #13
0
        #print(oxygenation_device, oxygen_flow_rate, fraction_inspired_oxygen)

#df2 = sql_query(
# "SELECT * from public.urgchum_episod_sn LIMIT 100 "
#)

#df = sql_query("SELECT * FROM dw_v01.oacis_ob WHERE " +
#    "rsltvalue IS NOT NULL AND " +
#    "dossier in (" + ", ".join(patient_mrns) + ") LIMIT 100")

# for index, row in df.iterrows():

#  print(row)
#  patient_mrn = str(row.dossier)
#  measurement_name = row.longdesc
#  #measurement_time = row.entereddtm
#  measurement_value = row.rsltvalue
#  measurement_units = row.unitcd

#  delta_hours = get_hours_between_datetimes(
#    pcr_sample_times[str(patient_mrn)], str(measurement_time))

#  if delta_hours > -48 and delta_hours < 7*24:
#    measurement_data_rows.append([
#      patient_mrn, measurement_name, measurement_time, measurement_value
#    ])

print('Total rows: %d' % len(observation_data_rows))

write_csv(TABLE_COLUMNS['observation_data'], observation_data_rows,
          os.path.join(CSV_DIRECTORY, 'observation_data.csv'))