def run(self, label, features):
		self.reset_spatial_encoder()
		self.reset_temporal_encoder()

		for i in range(len(features)):
			self.run_spatial_encoder(features.iloc[i,:])
			if (self.is_early_fusion):
				self.output_R_fused = utils.bundle([self.spatial_encoder_GSR.output_R, 
									 				self.spatial_encoder_ECG.output_R, 
									 				self.spatial_encoder_EEG.output_R])
			self.run_temporal_encoder()
			if (not self.is_early_fusion):
				self.output_T_fused = utils.bundle([self.temporal_encoder_GSR.output_T, 
									 				self.temporal_encoder_ECG.output_T, 
									 				self.temporal_encoder_EEG.output_T])
			if (i > 1):
				if (label == 'test'):
					actual_label_v = utils.classify(self.feature_memory.ds_label_v.iloc[i-self.ngram_size+1:i+1,0])
					actual_label_a = utils.classify(self.feature_memory.ds_label_a.iloc[i-self.ngram_size+1:i+1,0])
					self.predict_am_internal(actual_label_v, actual_label_a)
				else:
					self.accumulate_am(label)

		if (label == 'test'):
			self.compute_summary()
		else:
			self.bundle_am(label)
 def bundle(self, input_label):
     if (input_label == "v_plus"):
         self.prototype_v_plus = pd.Series(
             data=utils.bundle(self.accumulate_v_plus))
     elif (input_label == "v_min"):
         self.prototype_v_min = pd.Series(
             data=utils.bundle(self.accumulate_v_min))
     elif (input_label == "a_high"):
         self.prototype_a_high = pd.Series(
             data=utils.bundle(self.accumulate_a_high))
     elif (input_label == "a_low"):
         self.prototype_a_low = pd.Series(
             data=utils.bundle(self.accumulate_a_low))
     else:
         print("Invalid input label given")
def practitioner_conversion(input_path, map_df, output_path, partition):
    filter_cols = ['PROVIDERID', 'PROVIDER_SEX']
    input_df, subset_map_df = get_input_df("PROVIDER",
                                           input_path,
                                           map_df,
                                           use_cols=filter_cols)

    def map_one_row(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/practitioner.html",
            "resource": {
                "resourceType":
                "Practitioner",
                "id":
                row['PROVIDERID'],
                "gender":
                subset_map_df.loc['PROVIDER_SEX',
                                  row['PROVIDER_SEX']].at['fhir_out_cd']
            }
        }
        input_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    input_df_len = len(input_df)
    input_dir = os.path.join(output_path, 'Practitioner')
    write_fhir_0_json(input_dir)
    while i < input_df_len:
        input_fhir_entries = []
        part_input_df = input_df.iloc[i:i + partition, :]
        part_input_df.apply(lambda row: map_one_row(row), axis=1)
        part_input_df_len = len(part_input_df)
        file_name = get_partition_file_name(partition, part_input_df_len, i)
        write_fhir_json(bundle(entry=input_fhir_entries), input_dir, file_name)
        i = i + partition
    return
 def bundle(self):
     if self.use_final_hv:
         self.bind_input[self.num_channel] = utils.bind(
             [self.bind_input[self.num_channel - 1], self.bind_input[1]])
     self.output_R = pd.Series(utils.bundle(self.bind_input))
Ejemplo n.º 5
0
def main(argv):
    arg_parser = ArgParser()
    args = arg_parser.parse(argv)
    game_hash = args.game_hash

    # Set up logging
    formatter = logging.Formatter('%(asctime)s %(message)s')
    logging.basicConfig(filename=os.path.join(args.log_path,
                        'game_{}_vms.log'.format(game_hash)),
                        level=logging.DEBUG, format='%(asctime)s %(message)s',
                        datefmt="%H:%M:%S", filemode='w')
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    try:
        game = args.game
        logging.debug("Game description JSON: {}".format(game))
        status(game_hash, "Started creating VMs", args.remote)

        assert re.match(r'[a-zA-Z0-9]+\Z', game_hash)
        status(game_hash, "PENDING", args.remote)

        game_name = game['name']
        assert re.match(r'[a-zA-Z0-9 _-]+\Z', game_name)
        teams = game['teams']
        services = [s['service_name'] for s in game['services']]
        sudo = game.get('sudo', False)

        logging.info("Game name: {}".format(game_name))
        logging.info("Teams: {}".format(teams))
        logging.info("Services: {}".format(services))
        assert game['num_services'] == len(game['services'])
        assert game['num_services'] == len(services)
        # Avoid an IP conflict with the organization VM (10.7.254.10)
        assert len(teams) < 200

        #Cleaning up previous creations
        clean_up(args.output_path, game_hash, teams, bundle=True)
        game_dir = gamepath(args.output_path, game_hash)
        root_key_path = os.path.join(game_dir, "root_key")
        root_public_key = create_ssh_key(root_key_path)

        create_org(args.output_path, game_hash, game_name, teams, services,
                   root_key_path, args.remote)

        for team_id, team in enumerate(teams, start=1):
            team_public_key = create_ssh_key("{}/team{}_key".format(game_dir,
                                                                    team_id))
            create_team(args.output_path, game_hash, team_id, root_public_key,
                        team_public_key, team['password'], sudo, services,
                        args.remote)
        bundle(game_hash, "Organization", "root_key", "organization",
               args.output_path, args.remote)
        for team_id, team in enumerate(teams, start=1):
            team_name = team['name']
            bundle(game_hash, "Team{}".format(team_id),
                   "team{}_key".format(team_id), team_name, args.output_path,
                   args.remote)

        status(game_hash, "Cleaning up the build")
        clean_up(args.output_path, game_hash, teams)

        status(game_hash, "READY")

    except:
        status(game_hash, "An error occurred. Contact us and report game "
                          "{}".format(game_hash))
        status(game_hash, "ERROR")
        logging.exception("Exception")
        os.system("echo 'Creation for {} failed, see the log in /tmp' | "
                  "mail -s 'Error creating game {}' "
                  "root".format(game_hash, game_hash))
Ejemplo n.º 6
0
def condition_conversion(input_path, map_df, output_path, partition):
    # read DEATH_CAUSE to add to condition resource as needed
    dc_df = pd.read_csv(os.path.join(input_path, "DEATH_CAUSE.csv"), sep='|', index_col=['PATID'],
                        usecols=['PATID', 'DEATH_CAUSE', 'DEATH_CAUSE_CODE'])

    def map_one_condition(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/condition.html",
            "resource": {
                "resourceType": "Condition",
                "id": row['CON_IDENTIFIER'],
                "clinicalStatus": row['CON_CLINSTATUS'],
                "subject": {
                    "reference": "Patient/{}".format(row['CON_SUBJECT_REFERENCE'])
                },
                "context": {
                    "reference": "Encounter/{}".format(row['CON_CONTEXT_REFERENCE'])
                }
            }
        }
        if not pd.isnull(row['CON_CATEGORY_CODING_SYST']) or not pd.isnull(row['CON_CATEGORY_CODING_CODE']):
            cat_dict = {}
            code_dict = {}
            if not pd.isnull(row['CON_CATEGORY_CODING_SYST']):
                code_dict['system'] = row['CON_CATEGORY_CODING_SYST']
            if not pd.isnull(row['CON_CATEGORY_CODING_CODE']):
                code_dict['code'] = row['CON_CATEGORY_CODING_CODE']
            cat_dict['coding'] = [code_dict]
            entry['resource']['category'] = [cat_dict]

        if not pd.isnull(row['CON_CODE_CODING_SYST']) or not pd.isnull(row['CON_CODE_CODING_CODE']) or \
                row['PATID'] in dc_df.index:
            coding_dict = {}
            code_dict = {'coding': []}
            if not pd.isnull(row['CON_CODE_CODING_SYST']):
                coding_dict['system'] = row['CON_CODE_CODING_SYST']
            if not pd.isnull(row['CON_CODE_CODING_CODE']):
                coding_dict['code'] = row['CON_CODE_CODING_CODE']
            code_dict['coding'].append(coding_dict)
            if row['CON_SUBJECT_REFERENCE'] in dc_df.index:
                dc_codes = dc_df.loc[row['CON_SUBJECT_REFERENCE']]

                def map_one_dc_code(dc_row):
                    if str(dc_row['DEATH_CAUSE_CODE']) == '09':
                        code_system = 'http://hl7.org/fhir/sid/icd-9-cm'
                    elif str(dc_row['DEATH_CAUSE_CODE']) == '10':
                        code_system = 'http://hl7.org/fhir/sid/icd-10-cm'
                    else:
                        code_system = None

                    if code_system:
                        dc_code_dict = {
                            'system': code_system,
                            'code': dc_row['DEATH_CAUSE']
                        }
                        code_dict['coding'].append(dc_code_dict)
                    return

                if isinstance(dc_codes, pd.DataFrame):
                    dc_codes.apply(lambda dc_row: map_one_dc_code(dc_row), axis=1)
                else:  # it is of type Series
                    map_one_dc_code(dc_codes)

            entry['resource']['code'] = code_dict

        if not pd.isnull(row['CON_ASSERTER_REFERENCE']):
            entry['resource']['asserter'] = {
                "reference": "Practitioner/{}".format(row['CON_ASSERTER_REFERENCE'])
            }

        if not pd.isnull(row['CON_ASSERT_DATE']):
            entry['resource']['assertedDate'] = row['CON_ASSERT_DATE']

        if not pd.isnull(row['CON_ABATEDATE']):
            entry['resource']['abatementDateTime'] = row['CON_ABATEDATE']

        if not pd.isnull(row['CON_ONSETDATE']):
            entry['resource']['onsetDateTime'] = row['CON_ONSETDATE']

        cond_fhir_entries.append(entry)
        return

    filter_cols = ["CONDITIONID", "PATID", "ENCOUNTERID", "CONDITION_TYPE", "CONDITION", "REPORT_DATE",
                    "RESOLVE_DATE", "ONSET_DATE", 'CONDITION_STATUS', 'CONDITION_SOURCE']
    input_df, subset_map_df = get_input_df("CONDITION", input_path, map_df, use_cols=filter_cols)
    input_df = input_df.loc[input_df['CONDITION_SOURCE'] == 'HC']
    input_df.drop(columns=["CONDITION_SOURCE"])
    input_df.loc[(input_df.CONDITION_TYPE != 'HP') &
                 (input_df.CONDITION_TYPE != 'SM') &
                 (input_df.CONDITION_TYPE != '09') &
                 (input_df.CONDITION_TYPE != '10') &
                 (input_df.CONDITION_TYPE != '11'), 'CONDITION_TYPE'] = None
    input_df.loc[(input_df.CONDITION_TYPE == '09'), 'CONDITION_TYPE'] = 'http://hl7.org/fhir/sid/icd-9-cm'
    input_df.loc[(input_df.CONDITION_TYPE == '10'), 'CONDITION_TYPE'] = 'http://hl7.org/fhir/sid/icd-10-cm'
    input_df.loc[(input_df.CONDITION_TYPE == '11'), 'CONDITION_TYPE'] = 'http://hl7.org/fhir/sid/icd-11-cm'
    input_df.loc[(input_df.CONDITION_TYPE == 'SM'), 'CONDITION_TYPE'] = 'http://snomed.info/sct'
    input_df.loc[(input_df.CONDITION_TYPE == 'HP'), 'CONDITION_TYPE'] = 'https://hpo.jax.org/app/'
    input_df['CONDITION_STATUS'] = input_df['CONDITION_STATUS'].map(
        lambda x: subset_map_df.loc['CONDITION_STATUS', x].at['fhir_out_cd'])
    mapping = {"CONDITIONID": "CON_IDENTIFIER",
               "PATID": "CON_SUBJECT_REFERENCE",
               "ENCOUNTERID": "CON_CONTEXT_REFERENCE",
               "CONDITION_TYPE": "CON_CODE_CODING_SYST",
               "CONDITION": "CON_CODE_CODING_CODE",
               "REPORT_DATE": "CON_ASSERT_DATE",
               "RESOLVE_DATE": "CON_ABATEDATE",
               "ONSET_DATE": "CON_ONSETDATE",
               "CONDITION_STATUS": "CON_CLINSTATUS"}
    input_df.rename(columns=mapping, inplace=True)
    input_df['CON_ASSERTER_REFERENCE'] = None
    input_df['CON_CATEGORY_CODING_SYST'] = 'https://www.hl7.org/fhir/valueset-condition-category'
    input_df['CON_CATEGORY_CODING_CODE'] = 'problem-list-item'
    input_df = input_df.drop_duplicates()

    dx_df = pd.read_csv(os.path.join(input_path, "DIAGNOSIS.csv"), sep='|',
                        usecols=['DIAGNOSISID', 'ENCOUNTERID', 'PATID', 'PROVIDERID', 'DX_TYPE',
                                 'DX', 'ADMIT_DATE'])
    dx_df.loc[(dx_df.DX_TYPE != 'SM') &
              (dx_df.DX_TYPE != '09') &
              (dx_df.DX_TYPE != '10') &
              (dx_df.DX_TYPE != '11'), 'DX_TYPE'] = None
    dx_df.loc[(dx_df.DX_TYPE == '09'), 'DX_TYPE'] = 'http://hl7.org/fhir/sid/icd-9-cm'
    dx_df.loc[(dx_df.DX_TYPE == '10'), 'DX_TYPE'] = 'http://hl7.org/fhir/sid/icd-10-cm'
    dx_df.loc[(dx_df.DX_TYPE == '11'), 'DX_TYPE'] = 'http://hl7.org/fhir/sid/icd-11-cm'
    dx_df.loc[(dx_df.DX_TYPE == 'SM'), 'DX_TYPE'] = 'http://snomed.info/sct'
    mapping = {"DIAGNOSISID": "CON_IDENTIFIER",
               "PATID": "CON_SUBJECT_REFERENCE",
               "ENCOUNTERID": "CON_CONTEXT_REFERENCE",
               "PROVIDERID": "CON_ASSERTER_REFERENCE",
               "DX_TYPE": "CON_CODE_CODING_SYST",
               "DX": "CON_CODE_CODING_CODE",
               "ADMIT_DATE": "CON_ASSERT_DATE"}
    dx_df.rename(columns=mapping, inplace=True)
    dx_df['CON_CATEGORY_CODING_SYST'] = 'https://www.hl7.org/fhir/valueset-condition-category'
    dx_df['CON_CATEGORY_CODING_CODE'] = 'encounter-diagnosis'
    dx_df['CON_CLINSTATUS'] = 'active'
    dx_df['CON_ABATEDATE'] = None
    dx_df['CON_ONSETDATE'] = None
    dx_df = dx_df.drop_duplicates()
    join_df = pd.concat([dx_df, input_df])

    i = 0
    partition = int(partition)
    join_df_len = len(join_df)

    cond_dir = os.path.join(output_path, 'Condition')
    write_fhir_0_json(cond_dir)

    while i < join_df_len:
        cond_fhir_entries = []
        part_pat_df = join_df.iloc[i:i+partition, :]
        part_pat_df.apply(lambda row: map_one_condition(row), axis=1)
        part_pat_df_len = len(part_pat_df)
        file_name = get_partition_file_name(partition, part_pat_df_len, i)
        write_fhir_json(bundle(entry=cond_fhir_entries), cond_dir, file_name)
        i = i + partition
    return
Ejemplo n.º 7
0
def procedure_conversion(input_path, output_path, partition):
    filter_cols = [
        'PROCEDURESID', 'PATID', 'ENCOUNTERID', 'PX_TYPE', 'PX', 'PX_DATE',
        'PROVIDERID'
    ]
    input_df = pd.read_csv(os.path.join(input_path, 'PROCEDURES.csv'),
                           sep='|',
                           usecols=filter_cols)
    type_to_url = {
        '09': 'http://hl7.org/fhir/sid/icd-9-cm/',
        '10': 'http://hl7.org/fhir/sid/icd-10-cm/',
        '11': 'http://hl7.org/fhir/sid/icd-11-cm/',
        'CH': 'http://www.ama-assn.org/go/cpt/',
        'LC': 'http://loinc.org/',
        'ND': 'http://hl7.org/fhir/sid/ndc/'
    }

    def map_one_row(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/procedure.html",
            "resource": {
                "resourceType": "Procedure",
                "id": row['PROCEDURESID'],
                "status": "unknown",
            }
        }
        if not pd.isnull(row['PATID']):
            entry['resource']["subject"] = {
                "reference": "Patient/{}".format(row['PATID'])
            }
        if not pd.isnull(row['ENCOUNTERID']):
            entry['resource']["context"] = {
                "reference": "Encounter/{}".format(row['ENCOUNTERID'])
            }
        if not pd.isnull(row['PROVIDERID']):
            entry['resource']["performer"] = {
                'actor': {
                    "reference": "Practitioner/{}".format(row['PROVIDERID'])
                }
            }
        if not pd.isnull(row['PX']) and not pd.isnull(row['PX_TYPE']):
            coding_dict = {
                'system':
                type_to_url[row['PX_TYPE']]
                if row['PX_TYPE'] in type_to_url else '',
                'code':
                row['PX']
            }
            code_dict = {'coding': [coding_dict]}
            entry['resource']['code'] = code_dict

        if not pd.isnull(row['PX_DATE']):
            entry['resource']['performed'] = {
                'performedDateTime': row['PX_DATE']
            }

        input_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    input_df_len = len(input_df)
    input_dir = os.path.join(output_path, 'Procedure')
    write_fhir_0_json(input_dir)

    while i < input_df_len:
        input_fhir_entries = []
        part_input_df = input_df.iloc[i:i + partition, :]
        part_input_df.apply(lambda row: map_one_row(row), axis=1)
        part_input_df_len = len(part_input_df)
        file_name = get_partition_file_name(partition, part_input_df_len, i)
        write_fhir_json(bundle(entry=input_fhir_entries), input_dir, file_name)
        i = i + partition
    return
    EEG_fm.write(
        series_to_string(hdc_top.feature_memory.ds_data.iloc[
            i, 109:214].astype(int)) + "\n")

    hdc_top.run_spatial_encoder(hdc_top.feature_memory.ds_data.iloc[i, :])

    GSR_output_R.write(
        series_to_string(hdc_top.spatial_encoder_GSR.output_R) + "\n")
    ECG_output_R.write(
        series_to_string(hdc_top.spatial_encoder_ECG.output_R) + "\n")
    EEG_output_R.write(
        series_to_string(hdc_top.spatial_encoder_EEG.output_R) + "\n")

    hdc_top.output_R_fused = utils.bundle([
        hdc_top.spatial_encoder_GSR.output_R,
        hdc_top.spatial_encoder_ECG.output_R,
        hdc_top.spatial_encoder_EEG.output_R
    ])

    output_R_fused.write(list_to_string(hdc_top.output_R_fused) + "\n")

    hdc_top.run_temporal_encoder()

    output_T.write(series_to_string(hdc_top.temporal_encoder.output_T) + "\n")

    predicted_v, predicted_a = hdc_top.predict_am()
    output_V_label.write(str(predicted_v) + "\n")
    output_A_label.write(str(predicted_a) + "\n")

# Close Files
im.close()
Ejemplo n.º 9
0
def lab_conversion(input_path, map_df, output_path, partition):
    input_df, subset_map_df = get_input_df("LAB_RESULT_CM", input_path, map_df)

    def map_one_lab(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/lab.html",
            "resource": {
                "resourceType": "Observation",
                "id": row['LAB_RESULT_CM_ID'],
                "category": [
                    {
                        "coding": [
                            {
                                "system": "http://hl7.org/fhir/observation-category",
                                "code": "laboratory",
                                "display": "Laboratory"
                            }
                        ]
                    }
                ],
                "code": {
                    "coding": [
                        {
                            "system": "http://loinc.org",
                            "code": row['LAB_LOINC']
                        }
                    ]
                },
            "subject": {
              "reference": "Patient/{}".format(row['PATID'])
            },
            "context": {
              "reference": "Encounter/{}".format(row['ENCOUNTERID'])
            },
            "effectiveDateTime": "{} {}".format(row['SPECIMEN_DATE'], row['SPECIMEN_TIME']),
            "issued": row['RESULT_DATE'],
            "interpretation": {
              "coding": [
                {
                  "system": "http://hl7.org/fhir/ValueSet/observation-interpretation",
                  "code": subset_map_df.loc['ABN_IND', row['ABN_IND']].at['fhir_out_cd']
                }
              ]
            }
          }
        }
        if row['RESULT_QUAL'] != 'NI':
            entry['resource']["valueString"] = '{} {}'.format(row['RESULT_QUAL'], row['RAW_RESULT'])

        if row['RESULT_UNIT'] != 'NI':
            entry['resource']['valueQuantity'] = {
                "code": row['RESULT_UNIT']
            }
            comparator = subset_map_df.loc['RESULT_MODIFIER', row['RESULT_MODIFIER']].at['fhir_out_cd']
            if not pd.isnull(comparator):
                entry['resource']['valueQuantity']["comparator"] = comparator

        if not pd.isnull(row['NORM_RANGE_LOW']) and not pd.isnull(row['NORM_RANGE_HIGH']):
            entry['resource']['referenceRange'] = {
                "low": float(row['NORM_RANGE_LOW']),
                "high": row['NORM_RANGE_HIGH']
            }
        lab_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    pat_df_len = len(input_df)
    pat_dir = os.path.join(output_path, 'Lab')
    write_fhir_0_json(pat_dir)
    while i < pat_df_len:
        lab_fhir_entries = []
        part_pat_df = input_df.iloc[i:i+partition, :]
        part_pat_df.apply(lambda row: map_one_lab(row), axis=1)
        part_pat_df_len = len(part_pat_df)
        file_name = get_partition_file_name(partition, part_pat_df_len, i)
        write_fhir_json(bundle(entry=lab_fhir_entries), pat_dir, file_name)
        i = i + partition
    return
Ejemplo n.º 10
0
def encounter_conversion(input_path, map_df, output_path, partition):
    def map_one_encounter(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/encounter.html",
            "resource": {
                "resourceType":
                "Encounter",
                "id":
                row["ENCOUNTERID"],
                "subject": {
                    "reference": "Patient/{}".format(row["PATID"])
                },
                "participant": [{
                    "individual": {
                        "display": "Practitioner/{}".format(row['PROVIDERID'])
                    }
                }],
                "period": {
                    "start": row['ADMIT_DATE'],
                    "end": row['DISCHARGE_DATE']
                },
            }
        }

        if str(row['ENC_TYPE']).strip() and not pd.isnull(row['ENC_TYPE']):
            mapped_code = subset_map_df.loc['ENC_TYPE',
                                            row['ENC_TYPE']].at['fhir_out_cd']
            if not pd.isnull(mapped_code):
                entry['resource']['class'] = {
                    "system":
                    "http://hl7.org/fhir/v3/ActCode",
                    "code":
                    mapped_code,
                    "display":
                    subset_map_df.loc['ENC_TYPE',
                                      row['ENC_TYPE']].at['fhir_out_char']
                }
        if not pd.isnull(row['ADMITTING_SOURCE']):
            if 'hospitalization' not in entry['resource']:
                entry['resource']['hospitalization'] = {}
            entry['resource']['hospitalization']['admitSource'] = {
                "text":
                subset_map_df.loc['ADMITTING_SOURCE',
                                  row['ADMITTING_SOURCE']].at['fhir_out_char']
            }
        if not pd.isnull(row['DISCHARGE_STATUS']):
            if 'hospitalization' not in entry['resource']:
                entry['resource']['hospitalization'] = {}
            entry['resource']['hospitalization']['dischargeDisposition'] = {
                "coding": [{
                    "system":
                    "http://hl7.org/fhir/discharge-disposition",
                    "code":
                    subset_map_df.loc[
                        'DISCHARGE_STATUS',
                        row['DISCHARGE_STATUS']].at['fhir_out_cd'],
                    "display":
                    subset_map_df.loc[
                        'DISCHARGE_STATUS',
                        row['DISCHARGE_STATUS']].at['fhir_out_char']
                }]
            }
        if not pd.isnull(row['DIAGNOSISID']):
            entry['resource']["diagnosis"] = [{
                "condition": {
                    "reference": "Condition/{}".format(row['DIAGNOSISID'])
                },
                "role": {
                    "coding": [{
                        "system": "http://hl7.org/fhir/diagnosis-role",
                        "code": "DD"
                    }]
                },
                "rank": 1
            }]

        enc_fhir_entries.append(entry)
        return

    input_df, subset_map_df = get_input_df("ENCOUNTER", input_path, map_df)
    dx_df = pd.read_csv(
        os.path.join(input_path, "DIAGNOSIS.csv"),
        sep='|',
        usecols=['DIAGNOSISID', 'ENCOUNTERID', 'PDX', 'DX_SOURCE'])
    filter_dx_df = dx_df.loc[(dx_df['PDX'] == 'P')
                             & (dx_df['DX_SOURCE'] == 'DI')]
    join_df = pd.merge(input_df,
                       filter_dx_df,
                       how='left',
                       left_on=['ENCOUNTERID'],
                       right_on=['ENCOUNTERID'])
    i = 0
    partition = int(partition)
    join_df_len = len(join_df)
    enc_dir = os.path.join(output_path, 'Encounter')
    write_fhir_0_json(enc_dir)
    while i < join_df_len:
        enc_fhir_entries = []
        part_pat_df = join_df.iloc[i:i + partition, :]
        part_pat_df.apply(lambda row: map_one_encounter(row), axis=1)
        part_pat_df_len = len(part_pat_df)
        file_name = get_partition_file_name(partition, part_pat_df_len, i)
        write_fhir_json(bundle(entry=enc_fhir_entries), enc_dir, file_name)
        i = i + partition
    return
Ejemplo n.º 11
0
def patient_conversion(input_path, map_df, output_path, partition):
    pat_df, subset_map_df = get_input_df("DEMOGRAPHIC", input_path, map_df)
    address_df = pd.read_csv(os.path.join(input_path, "LDS_ADDRESS_HISTORY.csv"), sep='|', index_col=['PATID'],
                             usecols=['ADDRESSID', 'PATID', 'ADDRESS_USE', 'ADDRESS_CITY', 'ADDRESS_STATE',
                                      'ADDRESS_TYPE', 'ADDRESS_ZIP5', 'ADDRESS_ZIP9', 'ADDRESS_PERIOD_START',
                                      'ADDRESS_PERIOD_END'], dtype=str)
    addr_subset_map_df = map_df.loc["LDS_ADDRESS_HISTORY", :]

    # read DEATH to add to patient resource as needed
    death_df = pd.read_csv(os.path.join(input_path, "DEATH.csv"), sep='|', index_col=['PATID'],
                           usecols=['PATID', 'DEATH_SOURCE', 'DEATH_DATE'])

    def map_one_patient(row):
        pat_address_list = []

        def map_one_address(addr_row):
            # map Zip9, when it exists, from "286389277" to "28638-9277" - UNC-specfiic
            if not pd.isnull(addr_row['ADDRESS_ZIP9']):
                addr_str = addr_row['ADDRESS_ZIP9']
                postcode = '{}-{}'.format(addr_str[:5], addr_str[5:9])
            elif not pd.isnull(addr_row['ADDRESS_ZIP5']):
                postcode = addr_row['ADDRESS_ZIP5'][:5]
            else:
                postcode = None
            addr_dict = {
                "use": addr_subset_map_df.loc['ADDRESS_USE', addr_row['ADDRESS_USE']].at['fhir_out_cd'],
                "type": addr_subset_map_df.loc['ADDRESS_TYPE', addr_row['ADDRESS_TYPE']].at['fhir_out_cd'],
                "city": addr_row['ADDRESS_CITY'],
                "postalCode": postcode,
                "period": {
                    "start": addr_row['ADDRESS_PERIOD_START']
                }
            }
            if not pd.isnull(addr_row['ADDRESS_STATE']):
                addr_dict["state"] = addr_row['ADDRESS_STATE']
            if not pd.isnull(addr_row['ADDRESS_PERIOD_END']):
                addr_dict['period']["end"] = addr_row['ADDRESS_PERIOD_END']
            pat_address_list.append(addr_dict)
            return

        entry = {
            "fullUrl": "https://www.hl7.org/fhir/patient.html",
            "resource": {
                "resourceType": "Patient",
                "id": row['PATID'],
                "gender": subset_map_df.loc['SEX', row['SEX']].at['fhir_out_cd'],
                "birthDate": row['BIRTH_DATE'],
                "maritalStatus": {
                    "coding": [
                        {
                            "system": "http://terminology.hl7.org/CodeSystem/v3-MaritalStatus"
                        }
                    ]
                }
            }
        }
        mapped_race = subset_map_df.loc['RACE', row['RACE']].at['fhir_out_cd']
        if not pd.isnull(mapped_race):
            if 'extension' not in entry['resource']:
                entry['resource']['extension'] = []
            entry['resource']['extension'].append(
                {
                    "url": "http://terminology.hl7.org/ValueSet/v3-Race",
                    "valueString": mapped_race
                })
        mapped_ethnic = subset_map_df.loc['HISPANIC', row['HISPANIC']].at['fhir_out_cd']
        if not pd.isnull(mapped_ethnic):
            if 'extension' not in entry['resource']:
                entry['resource']['extension'] = []
            entry['resource']['extension'].append(
                {
                    "url": "http://hl7.org/fhir/v3/Ethnicity",
                    "valueString": mapped_ethnic
                })

        if row['PATID'] in address_df.index:
            part_addr_df = address_df.loc[row['PATID']]
            if isinstance(part_addr_df, pd.DataFrame):
                part_addr_df.apply(lambda addr_row: map_one_address(addr_row), axis=1)
            else: # it is of type Series
                map_one_address(part_addr_df)
            entry['resource']['address'] = pat_address_list

        if row['PATID'] in death_df.index:
            if not pd.isnull(death_df.loc[row['PATID']].at['DEATH_DATE']):
                entry['resource']['deceasedDateTime'] = death_df.loc[row['PATID']].at['DEATH_DATE']
            else:
                entry['resource']['deceasedBoolean'] = True

        pat_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    pat_df_len = len(pat_df)
    pat_dir = os.path.join(output_path, 'Patient')
    write_fhir_0_json(pat_dir)
    while i < pat_df_len:
        pat_fhir_entries = []
        part_pat_df = pat_df.iloc[i:i+partition, :]
        part_pat_df.apply(lambda row: map_one_patient(row), axis=1)
        part_pat_df_len = len(part_pat_df)
        file_name = get_partition_file_name(partition, part_pat_df_len, i)
        write_fhir_json(bundle(entry=pat_fhir_entries), pat_dir, file_name)
        i = i + partition
    return
Ejemplo n.º 12
0
def vital_conversion(input_path, map_df, output_path, partition):
    def map_one_vital(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/vital.html",
            "resource": {
                "resourceType": "Observation",
                "id": row['VITALID'],
                "status": 'final'
            }
        }
        if not pd.isnull(row['MEASURE_DATE']):
            entry['resource']["effectiveDateTime"] = row['MEASURE_DATE']
        if not pd.isnull(row['PATID']):
            entry['resource']["subject"] = {
                "reference": "Patient/{}".format(row['PATID'])
            }
        if not pd.isnull(row['ENCOUNTERID']):
            entry['resource']["context"] = {
                "reference": "Encounter/{}".format(row['ENCOUNTERID'])
            }

        if not pd.isna(row['SMOKING']) and not pd.isnull(row['SMOKING']):
            cat_code_dict = {
                'system': "http://hl7.org/fhir/ValueSet/observation-category",
                'code': 'social-history',
                "display": 'Social History'
            }
            cat_dict = {'coding': [cat_code_dict]}
            entry['resource']['category'] = [cat_dict]
            coding_dict = {
                "system":
                'http://snomed.info/sct/',
                "code":
                subset_map_df.loc['SMOKING', row['SMOKING']].at['fhir_out_cd'],
                "display":
                subset_map_df.loc['SMOKING',
                                  row['SMOKING']].at['fhir_out_char']
            }
            entry['resource']['code'] = {'coding': [coding_dict]}
        elif not row['TOBACCO']:
            entry['resource']['valueQuantity'] = []
            cat_code_dict = {
                'system': "http://hl7.org/fhir/ValueSet/observation-category",
                'code': 'vital-signs',
                "display": 'Vital Signs'
            }
            cat_dict = {'coding': [cat_code_dict]}
            entry['resource']['category'] = [cat_dict]
            code_code_array = []
            if row['HT']:
                entry['resource']['valueQuantity'].append({
                    'system': 'https://unitsofmeasure.org',
                    'value': row['HT'],
                    'code': '[in_i]'
                })

                coding_dict = {
                    "system": 'http://loinc.org/',
                    "code": '8302-2',
                    "display": 'Body height'
                }
                code_code_array.append(coding_dict)
            if row['WT']:
                entry['resource']['valueQuantity'].append({
                    'system': 'https://unitsofmeasure.org',
                    'value': row['WT'],
                    'code': '[lb_av]'
                })
                coding_dict = {
                    "system": 'http://loinc.org/',
                    "code": '29463-7',
                    "display": 'Body weight'
                }
                code_code_array.append(coding_dict)
            if row['SYSTOLIC']:
                entry['resource']['valueQuantity'].append({
                    'system':
                    'https://unitsofmeasure.org',
                    'value':
                    row['SYSTOLIC'],
                    'code':
                    'mm[Hg]'
                })
                coding_dict = {
                    "system": 'http://loinc.org/',
                    "code": '8480-6',
                    "display": 'Systolic blood pressure'
                }
                code_code_array.append(coding_dict)
            if row['DIASTOLIC']:
                entry['resource']['valueQuantity'].append({
                    'system':
                    'https://unitsofmeasure.org',
                    'value':
                    row['DIASTOLIC'],
                    'code':
                    'mm[Hg]'
                })
                coding_dict = {
                    "system": 'http://loinc.org/',
                    "code": '8462-4',
                    "display": 'Diastolic blood pressure'
                }
                code_code_array.append(coding_dict)
            if row['ORIGINAL_BMI']:
                entry['resource']['valueQuantity'].append({
                    'system':
                    'https://unitsofmeasure.org',
                    'value':
                    row['ORIGINAL_BMI'],
                    'code':
                    'kg/m2'
                })
                coding_dict = {
                    "system": 'http://loinc.org/',
                    "code": '36156-5',
                    "display": 'Body mass index'
                }
                code_code_array.append(coding_dict)

            entry['resource']['code'] = {'coding': code_code_array}

        vital_fhir_entries.append(entry)
        return

    filter_cols = [
        "VITALID", "PATID", "ENCOUNTERID", "HT", "SYSTOLIC", "MEASURE_DATE",
        "SMOKING", "TOBACCO", "DIASTOLIC", "ORIGINAL_BMI", "WT"
    ]
    input_df, subset_map_df = get_input_df("VITAL",
                                           input_path,
                                           map_df,
                                           use_cols=filter_cols)
    i = 0
    partition = int(partition)
    input_df_len = len(input_df)

    vital_dir = os.path.join(output_path, 'Vital')
    write_fhir_0_json(vital_dir)

    while i < input_df_len:
        vital_fhir_entries = []

        part_pat_df = input_df.iloc[i:i + partition, :]

        part_pat_df.apply(lambda row: map_one_vital(row), axis=1)
        part_pat_df_len = len(part_pat_df)
        file_name = get_partition_file_name(partition, part_pat_df_len, i)
        write_fhir_json(bundle(entry=vital_fhir_entries), vital_dir, file_name)
        i = i + partition
    return
def medicationrequest_conversion(input_path, map_df, output_path, partition):
    prescribe_df, prescribe_subset_map_df = get_input_df(
        "PRESCRIBING", input_path, map_df)

    def map_one_medicationrequest(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/medicationrequest.html",
            "resource": {
                "resourceType":
                "MedicationRequest",
                "id":
                row["PRESCRIBINGID"],
                "intent":
                "order",
                "medicationCodeableConcept": {
                    "coding": [{
                        "system":
                        "http://www.nlm.nih.gov/research/umls/rxnorm",
                        "code": str(row["RXNORM_CUI"]),
                        "display": str(row["RAW_RX_MED_NAME"])
                    }]
                },
                "subject": {
                    "reference": "Patient/{}".format(row["PATID"])
                },
                "authoredOn":
                str(row["RX_ORDER_DATE"]),
                "requester": {
                    "agent": {
                        "reference":
                        "Practitioner/{}".format(row["RX_PROVIDERID"])
                    }
                },
                "dosageInstruction": [{
                    "text":
                    prescribe_subset_map_df.loc[
                        'RX_FREQUENCY', row['RX_FREQUENCY']].at['fhir_out_cd'],
                    "asNeededBoolean":
                    prescribe_subset_map_df.loc[
                        'RX_PRN_FLAG', row['RX_PRN_FLAG']].at['fhir_out_cd']
                    if not pd.isnull(row['RX_PRN_FLAG']) else None,
                    "route": {
                        "coding": [{
                            "code": str(row["RX_ROUTE"])
                        }]
                    },
                    "doseQuantity": {
                        "value": row["RX_DOSE_ORDERED"],
                        "unit": row["RX_DOSE_ORDERED_UNIT"]
                    }
                }],
                "dispenseRequest": {
                    "validityPeriod": {
                        "start": row["RX_START_DATE"],
                        "end": row["RX_END_DATE"]
                    }
                },
                "substitution": {
                    "allowed":
                    prescribe_subset_map_df.loc[
                        'RX_DISPENSE_AS_WRITTEN',
                        row['RX_DISPENSE_AS_WRITTEN']].at['fhir_out_cd']
                }
            }
        }
        prescribe_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    mr_dir = os.path.join(output_path, 'MedicationRequest')
    write_fhir_0_json(mr_dir)

    prescribe_df_len = len(prescribe_df)
    while i < prescribe_df_len:
        prescribe_fhir_entries = []
        part_prescribe_df = prescribe_df.iloc[i:i + partition, :]
        part_prescribe_df.apply(lambda row: map_one_medicationrequest(row),
                                axis=1)
        part_prescribe_df_len = len(part_prescribe_df)
        file_name = get_partition_file_name(partition, part_prescribe_df_len,
                                            i)
        write_fhir_json(bundle(entry=prescribe_fhir_entries), mr_dir,
                        file_name)
        i = i + partition
    return
Ejemplo n.º 14
0
def main(argv):
    arg_parser = ArgParser()
    args = arg_parser.parse(argv)
    game_hash = args.game_hash

    # Set up logging
    formatter = logging.Formatter('%(asctime)s %(message)s')
    logging.basicConfig(filename=os.path.join(
        args.log_path, 'game_{}_vms.log'.format(game_hash)),
                        level=logging.DEBUG,
                        format='%(asctime)s %(message)s',
                        datefmt="%H:%M:%S",
                        filemode='w')
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    try:
        game = args.game
        logging.debug("Game description JSON: {}".format(game))
        status(game_hash, "Started creating VMs", args.remote)

        assert re.match(r'[a-zA-Z0-9]+\Z', game_hash)
        status(game_hash, "PENDING", args.remote)

        game_name = game['name']
        assert re.match(r'[a-zA-Z0-9 _-]+\Z', game_name)
        teams = game['teams']
        services = [s['service_name'] for s in game['services']]
        sudo = game.get('sudo', False)

        logging.info("Game name: {}".format(game_name))
        logging.info("Teams: {}".format(teams))
        logging.info("Services: {}".format(services))
        assert game['num_services'] == len(game['services'])
        assert game['num_services'] == len(services)
        # Avoid an IP conflict with the organization VM (10.7.254.10)
        assert len(teams) < 200

        #Cleaning up previous creations
        clean_up(args.output_path, game_hash, teams, bundle=True)
        game_dir = gamepath(args.output_path, game_hash)
        root_key_path = os.path.join(game_dir, "root_key")
        root_public_key = create_ssh_key(root_key_path)

        create_org(args.output_path, game_hash, game_name, teams, services,
                   root_key_path, args.remote)

        for team_id, team in enumerate(teams, start=1):
            team_public_key = create_ssh_key("{}/team{}_key".format(
                game_dir, team_id))
            create_team(args.output_path, game_hash, team_id, root_public_key,
                        team_public_key, team['password'], sudo, services,
                        args.remote)
        bundle(game_hash, "Organization", "root_key", "organization",
               args.output_path, args.remote)
        for team_id, team in enumerate(teams, start=1):
            team_name = team['name']
            bundle(game_hash, "Team{}".format(team_id),
                   "team{}_key".format(team_id), team_name, args.output_path,
                   args.remote)

        status(game_hash, "Cleaning up the build")
        clean_up(args.output_path, game_hash, teams)

        status(game_hash, "READY")

    except:
        status(
            game_hash, "An error occurred. Contact us and report game "
            "{}".format(game_hash))
        status(game_hash, "ERROR")
        logging.exception("Exception")
        os.system("echo 'Creation for {} failed, see the log in /tmp' | "
                  "mail -s 'Error creating game {}' "
                  "root".format(game_hash, game_hash))
Ejemplo n.º 15
0
def obs_conversion(input_path, map_df, output_path, partition):
    filter_cols = ['OBSCLINID', 'PATID', 'ENCOUNTERID', 'OBSCLIN_PROVIDERID', 'OBSCLIN_CODE', 'OBSCLIN_TYPE',
                   'OBSCLIN_DATE', 'OBSCLIN_TIME', 'OBSCLIN_RESULT_NUM', 'OBSCLIN_RESULT_UNIT']
    input_df, subset_map_df = get_input_df("OBS_CLIN", input_path, map_df, use_cols=filter_cols)

    def map_one_row(row):
        entry = {
            "fullUrl": "https://www.hl7.org/fhir/observation.html",
            "resource": {
                "resourceType": "Observation",
                "id": row['OBSCLINID'],
                "status": "final"
            }
        }
        if not pd.isnull(row['PATID']):
            entry['resource']["subject"] = {
                "reference": "Patient/{}".format(row['PATID'])
            }
        if not pd.isnull(row['ENCOUNTERID']):
            entry['resource']["context"] = {
                "reference": "Encounter/{}".format(row['ENCOUNTERID'])
            }
        if not pd.isnull(row['OBSCLIN_PROVIDERID']):
            entry['resource']["performer"] = {
                'actor': {
                    "reference": "Practitioner/{}".format(row['OBSCLIN_PROVIDERID'])
                }
            }
        if not pd.isnull(row['OBSCLIN_RESULT_NUM']) or not pd.isnull(row['OBSCLIN_RESULT_UNIT']):
            value_dict = {
                'system': "http://unitsofmeasure.org",
            }
            if not pd.isnull(row['OBSCLIN_RESULT_NUM']):
                value_dict['value'] = row['OBSCLIN_RESULT_NUM']
            if not pd.isnull(row['OBSCLIN_RESULT_UNIT']):
                value_dict['unit'] = row['OBSCLIN_RESULT_UNIT']
            entry['resource']['valueQuantity'] = value_dict

        if not pd.isnull(row['OBSCLIN_DATE']):
            entry['resource']['valueDateTime'] = row['OBSCLIN_DATE']

        if not pd.isnull(row['OBSCLIN_TIME']):
            entry['resource']['valueTime'] = row['OBSCLIN_TIME']

        if not pd.isnull(row['OBSCLIN_CODE']):
            code_dict = {
                "code": row['OBSCLIN_CODE']
            }
            if not pd.isnull(row['OBSCLIN_TYPE']):
                code_dict['system'] = subset_map_df.loc['OBSCLIN_TYPE', row['OBSCLIN_TYPE']].at['fhir_out_cd']
            entry['resource']['code'] = {
                "coding": [code_dict]
            }

        input_fhir_entries.append(entry)
        return

    i = 0
    partition = int(partition)
    input_df_len = len(input_df)
    input_dir = os.path.join(output_path, 'Observation')
    write_fhir_0_json(input_dir)
    while i < input_df_len:
        input_fhir_entries = []
        part_input_df = input_df.iloc[i:i+partition, :]
        part_input_df.apply(lambda row: map_one_row(row), axis=1)
        part_input_df_len = len(part_input_df)
        file_name = get_partition_file_name(partition, part_input_df_len, i)
        write_fhir_json(bundle(entry=input_fhir_entries), input_dir, file_name)
        i = i + partition
    return