Exemplo n.º 1
0
def process_batch(patientid_start, patientid_stop):
    OUTPUT_DIR="/cluster/work/grlab/clinical/hirid2/research/faltysm/volume_challenge/patient_stay"
    mon_value_ids=[200,110]
    volume_pharma_ids=[1000752]
    observ_value_ids=[10000450,14000140,10000400,14000100]

    #open spark sessions
    spark = get_spark_session(8, 1024, 64)
    output_dir = os.path.join(OUTPUT_DIR, datetime.today().strftime('%Y-%m-%d'))

    df_stay_table = pd.read_hdf(STATIC_PATH, where='PatientID >= ' + str(patientid_start) + ' and PatientID <= ' + str(patientid_stop))
    df_monvals = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_monvals')).where(sf.col('variableid').isin(mon_value_ids)).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_dervals = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_dervals')).where(sf.col('variableid').isin([30000140])).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_pharmarec = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_pharmarec')).where(sf.col('pharmaid').isin(volume_pharma_ids)).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_seq = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_patcareseqs')).where("(lower(name) like '%impella%' or lower(name) like '%ecmo%') and lower(name) not like '%wunde%' and lower(name) not like '%ex%' and lower(name) not like '%naht%' and lower(name) not like '%blut%'").where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_patitem = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_codeditem')).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_code = spark.read.parquet(os.path.join(HIRID2_PATH, 's_coderef')).toPandas()
    df_apache_groups = pd.read_csv('/cluster/work/grlab/clinical/hirid2/research/faltysm/volume_challenge/misc/apache_group.csv')
    df_observedrec = spark.read.parquet(os.path.join(HIRID2_PATH, 'p_observrec')).where(sf.col('variableid').isin(observ_value_ids)).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()

    df_stay_info = pd.DataFrame(columns=['patientid', 'rel_referencepoint', 'lenght_of_stay', 'age_at_admission', 'sex', 'invasive_hr_measurement_criterion', 'invasive_bp_measurement_criterion', 'age_criterion','study_drug_criterion', 'ecmo_criterion', 'adm_diag', 'adm_codeid', 'emergency', 'height', 'apacheII', 'sepsis', 'weight', 'outcome_death'])
    for idx, row in df_stay_table.iterrows():
        print(row.PatientID)
        df_stay_info = df_stay_info.append(pd.Series(process_stay_information(row.PatientID, row.birthYear, row.Sex, df_monvals, df_pharmarec, df_seq, df_patitem, df_code, df_apache_groups, df_dervals, df_observedrec), index=df_stay_info.columns), ignore_index=True)
    
    if not os.path.exists(output_dir): 
        os.makedirs(output_dir)

    if not df_stay_info.empty:
        df_stay_info.to_parquet(os.path.join(OUTPUT_DIR, datetime.today().strftime('%Y-%m-%d'), "patientstay_{}_{}.parquet".format(patientid_start, patientid_stop)))
Exemplo n.º 2
0
def process_batch(batchid, patientid_start, patientid_stop):
    print(
        f'Processing batch id: {batchid}: {patientid_start}-{patientid_stop}')
    #open spark session
    spark = get_spark_session(8, 1024, 64)

    cand_file = glob.glob(
        os.path.join(HIRID2_META_PATH,
                     "reduced_fmat_{}_*.h5".format(batchid)))[0]

    df_bolus = spark.read.parquet(ISOBOLUS_PATH).where(
        'patientid >= ' + str(patientid_start) + ' and patientid <= ' +
        str(patientid_stop)).toPandas()
    df_stay_data = spark.read.parquet(PATIENT_STAY_PATH).where(
        'patientid >= ' + str(patientid_start) + ' and patientid <= ' +
        str(patientid_stop)).toPandas()
    df_monvals = pd.read_hdf(os.path.join(HIRID2_META_PATH, cand_file),
                             columns=['PatientID', 'Datetime'] + mon_value_ids,
                             where="PatientID in [" +
                             ','.join(map(str, df_bolus.patientid.unique())) +
                             "]")

    df_outcomes = pd.DataFrame()
    for patientid in df_bolus.patientid.unique():
        boluses_pat = process_patient(patientid, df_stay_data, df_bolus,
                                      df_monvals)
        if len(boluses_pat) > 0:
            df_outcomes = df_outcomes.append(boluses_pat, ignore_index=True)

    return df_outcomes
def process_batch(batchid, patientid_start, patientid_stop):
    print(
        f'Processing batch id: {batchid}: {patientid_start}-{patientid_stop}')
    #open spark session
    spark = get_spark_session(8, 1024, 64)

    df_bolus = spark.read.parquet(ISOBOLUS_PATH).where(
        'vm1_criterion = 1 and vm5_criterion = 1').where(
            'patientid >= ' + str(patientid_start) + ' and patientid <= ' +
            str(patientid_stop)).toPandas()
    cand_file_mon = glob.glob(
        os.path.join(HIRID2_META_PATH,
                     "reduced_fmat_{}_*.h5".format(batchid)))[0]
    df_monvals = pd.read_hdf(os.path.join(HIRID2_META_PATH, cand_file_mon),
                             columns=['PatientID', 'Datetime'] + all_ids,
                             where="PatientID in [" +
                             ','.join(map(str, df_bolus.patientid.unique())) +
                             "]")
    df_stay_data = spark.read.parquet(PATIENT_STAY_PATH).where(
        'patientid >= ' + str(patientid_start) + ' and patientid <= ' +
        str(patientid_stop)).toPandas()

    #simple outlier filtering
    df_monvals = filter_mon_vals(df_monvals)

    #generate columns names
    result_columns = ['patientid', 'bolusid']
    for variableid in mon_value_ids + last_values + pharma_ids + [
            'age', 'sex', 'emergency', 'height', 'adm_diag', 'adm_codeid',
            'apacheII', 'sepsis', 'ventilated'
    ]:
        result_columns.append(variableid + "_baseline")
        if variableid in TREND_VARS:
            result_columns.append(variableid + "_trend")
        if variableid in VARIABLE_PERIODS_OUTCOMES:
            period_lenght = VARIABLE_PERIODS_OUTCOMES[variableid]
            for i in range(0, OUTCOME_OBSERVATION_TIME, period_lenght):
                result_columns.append(variableid + "_" + str(i))

    #process data
    df_outcomes = pd.DataFrame(columns=result_columns)
    for patientid in df_bolus.patientid.unique():
        outcome_pat = process_patient(patientid, df_stay_data, df_bolus,
                                      df_monvals, result_columns)
        if len(outcome_pat) > 0:
            df_outcomes = df_outcomes.append(outcome_pat, ignore_index=True)

    return df_outcomes
Exemplo n.º 4
0
def process_batch(patientid_start, patientid_stop):
    #open spark session
    spark = get_spark_session(8, 1024, 64)
    df_isobolus = spark.read.parquet(ISOBOLUS_PATH).where('vm1_criterion = 1 and vm5_criterion = 1').where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_bolus = spark.read.parquet(BOLUS_PATH).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()
    df_stay_data = spark.read.parquet(PATIENT_STAY_PATH).where('patientid >= ' + str(patientid_start) + ' and patientid <= ' + str(patientid_stop)).toPandas()

    df_batch_non_boluses = pd.DataFrame(columns=['patientid', 'bolusid', 'startindex', 'endindex', 'lenght', 'fluidamount', 'previous_fluid_total', 'previous_fluid_30min', 'previous_fluid_1h', 'previous_fluid_2h'])
    for patientid in df_isobolus.patientid.unique():
        df_isobolus_pat = df_isobolus[df_isobolus.patientid == patientid]
        df_bolus_pat = df_bolus[df_bolus.patientid == patientid]
        pat_boluses = process_patient(patientid, df_isobolus_pat, df_bolus_pat, df_stay_data) 
         
        df_batch_non_boluses = df_batch_non_boluses.append(pat_boluses, ignore_index=True)

    output_dir= os.path.join(OUTPUT_DIR, 'non_bolus', datetime.today().strftime('%Y-%m-%d'))

    if not os.path.exists(output_dir): 
        os.makedirs(output_dir)

    if len(df_batch_non_boluses)>0:
        df_batch_non_boluses.to_parquet(os.path.join(output_dir, "non_bolus_{}_{}.parquet".format(patientid_start, patientid_stop)))
        'pm46': 300,
        'vm136': 3600,
        'vm140': 3600,
        'vm138': 3600,
        'vm132': 3600,
        'vm134': 3600,
        'vm174': 3600,
        'vm263': 3600,
        'vm14': 300,
        'vm20': 300,
        'vm142': 3600,
        'vm141': 3600,
        'vm183': 86400
    }

    spark = get_spark_session(8, 1024, 64)
    df_batches = pd.read_csv(ID_PATH)

    for batchid in range(args.batchid_start, args.batchid_stop + 1):
        df_outcomes = pd.DataFrame()
        pids_batch = df_batches[df_batches.BatchID == batchid].PatientID
        df_pat_outcomes = process_batch(batchid, pids_batch.min(),
                                        pids_batch.max())
        df_outcomes = df_outcomes.append(df_pat_outcomes)

        output_dir = os.path.join(
            OUTPUT_DIR,
            datetime.datetime.today().strftime('%Y-%m-%d'))
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
Exemplo n.º 6
0
def process_batch(args):
    #open spark session
    spark = get_spark_session(8, 1024, 64)
    df_pharmarec = spark.read.parquet(os.path.join(
        HIRID2_PATH,
        'p_pharmarec')).where('patientid >= ' + str(args.patientid_start) +
                              ' and patientid <= ' +
                              str(args.patientid_stop)).cache()
    #get only patients/stays satisfying study criteria
    df_stay_data = get_included_subset(
        spark.read.parquet(PATIENT_STAY_PATH).where(
            'patientid >= ' + str(args.patientid_start) +
            ' and patientid <= ' + str(args.patientid_stop))).cache()

    df_batch_boluses = pd.DataFrame(columns=[
        'patientid', 'bolusid', 'startindex', 'endindex', 'lenght',
        'fluidamount', 'previous_fluid_total', 'previous_fluid_30min',
        'previous_fluid_1h', 'previous_fluid_2h'
    ])
    df_batch_isolated_boluses = pd.DataFrame(columns=[
        'patientid', 'bolusid', 'startindex', 'endindex', 'lenght',
        'fluidamount', 'previous_fluid_total', 'previous_fluid_30min',
        'previous_fluid_1h', 'previous_fluid_2h'
    ])
    for patientid in df_stay_data.select(
            'patientid').distinct().toPandas().iloc[:, 0]:
        rastered_infusions, rastered_ringer, infusion_sources, bolus_region, bolus, isolated_bolus, df_boluses, df_isolated_boluses, avg_volume_h = process_patient(
            patientid, df_stay_data, df_pharmarec)

        df_batch_boluses = df_batch_boluses.append(df_boluses,
                                                   ignore_index=True)
        df_batch_isolated_boluses = df_batch_isolated_boluses.append(
            df_isolated_boluses, ignore_index=True)

        #saving rastered ringer as a pickle per patient
        output_dir_volume = os.path.join(OUTPUT_DIR, 'volume',
                                         datetime.today().strftime('%Y-%m-%d'))
        if not os.path.exists(output_dir_volume):
            os.makedirs(output_dir_volume)
        pickle.dump(
            rastered_ringer,
            open(os.path.join(output_dir_volume, f"{patientid}.pickle"), "wb"))

    output_dir_bolus = os.path.join(OUTPUT_DIR, 'bolus',
                                    datetime.today().strftime('%Y-%m-%d'))
    output_dir_bolus_isolated = os.path.join(
        OUTPUT_DIR, 'bolus_isolated',
        datetime.today().strftime('%Y-%m-%d'))

    if not os.path.exists(output_dir_bolus):
        os.makedirs(output_dir_bolus)

    if not df_batch_boluses.empty:
        df_batch_boluses.to_parquet(
            os.path.join(
                output_dir_bolus,
                "bolus_{}_{}.parquet".format(args.patientid_start,
                                             args.patientid_stop)))

    if not os.path.exists(output_dir_bolus_isolated):
        os.makedirs(output_dir_bolus_isolated)

    if not df_batch_isolated_boluses.empty:
        df_batch_isolated_boluses.to_parquet(
            os.path.join(
                output_dir_bolus_isolated,
                "bolus_isolated_{}_{}.parquet".format(args.patientid_start,
                                                      args.patientid_stop)))