def query(nidm_file_list, query_file, output_file, get_participants,get_instruments,get_instrument_vars): #query result list results = [] if get_participants: df = GetParticipantIDs(nidm_file_list.split(','),output_file=output_file) elif get_instruments: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count=1 for project in project_list: if count == 1: df = GetProjectInstruments(nidm_file_list.split(','),project_id=project) count+=1 else: df = df.append(GetProjectInstruments(nidm_file_list.split(','),project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) #with open(output_file,'w') as myfile: # wr=csv.writer(myfile,quoting=csv.QUOTE_ALL) # wr.writerow(df) #pd.DataFrame.from_records(df,columns=["Instruments"]).to_csv(output_file) else: print(df) elif get_instrument_vars: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count=1 for project in project_list: if count == 1: df = GetInstrumentVariables(nidm_file_list.split(','),project_id=project) count+=1 else: df = df.append(GetInstrumentVariables(nidm_file_list.split(','),project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) else: print(df) else: #read query from text fiile with open(query_file, 'r') as fp: query = fp.read() df = sparql_query_nidm(nidm_file_list.split(','),query,output_file) return df
def query(nidm_file_list, cde_file_list, query_file, output_file, get_participants, get_instruments, get_instrument_vars, get_dataelements, get_brainvols, get_dataelements_brainvols, get_fields, uri, blaze, j, verbosity): """ This function provides query support for NIDM graphs. """ #query result list results = [] # if there is a CDE file list, seed the CDE cache if cde_file_list: getCDEs(cde_file_list.split(",")) if blaze: os.environ["BLAZEGRAPH_URL"] = blaze print("setting BLAZEGRAPH_URL to {}".format(blaze)) if get_participants: df = GetParticipantIDs(nidm_file_list.split(','), output_file=output_file) if ((output_file) is None): print(df.to_string()) return df elif get_instruments: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count = 1 for project in project_list: if count == 1: df = GetProjectInstruments(nidm_file_list.split(','), project_id=project) count += 1 else: df = df.append( GetProjectInstruments(nidm_file_list.split(','), project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) #with open(output_file,'w') as myfile: # wr=csv.writer(myfile,quoting=csv.QUOTE_ALL) # wr.writerow(df) #pd.DataFrame.from_records(df,columns=["Instruments"]).to_csv(output_file) else: print(df.to_string()) elif get_instrument_vars: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count = 1 for project in project_list: if count == 1: df = GetInstrumentVariables(nidm_file_list.split(','), project_id=project) count += 1 else: df = df.append( GetInstrumentVariables(nidm_file_list.split(','), project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) else: print(df.to_string()) elif get_dataelements: datael = GetDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): datael.to_csv(output_file) else: print(datael.to_string()) elif get_fields: # fields only query. We'll do it with the rest api restParser = RestParser(verbosity_level=int(verbosity)) if (output_file is not None): restParser.setOutputFormat(RestParser.OBJECT_FORMAT) df_list = [] else: restParser.setOutputFormat(RestParser.CLI_FORMAT) # set up uri to do fields query for each nidm file for nidm_file in nidm_file_list.split(","): # get project UUID project = GetProjectsUUID([nidm_file]) uri = "/projects/" + project[0].toPython().split( "/")[-1] + "?fields=" + get_fields # get fields output from each file and concatenate if (output_file is None): # just print results print(restParser.run([nidm_file], uri)) else: df_list.append(pd.DataFrame(restParser.run([nidm_file], uri))) if (output_file is not None): # concatenate data frames df = pd.concat(df_list) # output to csv file df.to_csv(output_file) elif uri: restParser = RestParser(verbosity_level=int(verbosity)) if j: restParser.setOutputFormat(RestParser.JSON_FORMAT) elif (output_file is not None): restParser.setOutputFormat(RestParser.OBJECT_FORMAT) else: restParser.setOutputFormat(RestParser.CLI_FORMAT) df = restParser.run(nidm_file_list.split(','), uri) if (output_file is not None): if j: with open(output_file, "w+") as f: f.write(dumps(df)) else: # convert object df to dataframe and output pd.DataFrame(df).to_csv(output_file) else: print(df) elif get_dataelements_brainvols: brainvol = GetBrainVolumeDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) elif get_brainvols: brainvol = GetBrainVolumes(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) elif query_file: df = sparql_query_nidm(nidm_file_list.split(','), query_file, output_file) if ((output_file) is None): print(df.to_string()) return df else: print("ERROR: No query parameter provided. See help:") print() os.system("pynidm query --help") exit(1)
def query(nidm_file_list, cde_file_list, query_file, output_file, get_participants, get_instruments, get_instrument_vars, get_dataelements, get_brainvols, get_dataelements_brainvols, uri, j, verbosity): #query result list results = [] # if there is a CDE file list, seed the CDE cache if cde_file_list: getCDEs(cde_file_list.split(",")) if get_participants: df = GetParticipantIDs(nidm_file_list.split(','), output_file=output_file) if ((output_file) is None): print(df.to_string()) return df elif get_instruments: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count = 1 for project in project_list: if count == 1: df = GetProjectInstruments(nidm_file_list.split(','), project_id=project) count += 1 else: df = df.append( GetProjectInstruments(nidm_file_list.split(','), project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) #with open(output_file,'w') as myfile: # wr=csv.writer(myfile,quoting=csv.QUOTE_ALL) # wr.writerow(df) #pd.DataFrame.from_records(df,columns=["Instruments"]).to_csv(output_file) else: print(df.to_string()) elif get_instrument_vars: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count = 1 for project in project_list: if count == 1: df = GetInstrumentVariables(nidm_file_list.split(','), project_id=project) count += 1 else: df = df.append( GetInstrumentVariables(nidm_file_list.split(','), project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) else: print(df.to_string()) elif get_dataelements: datael = GetDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): datael.to_csv(output_file) else: print(datael.to_string()) elif uri: df = restParser(nidm_file_list.split(','), uri, int(verbosity)) if j: print(dumps(df, indent=2)) else: if type(df) == list: for x in df: print(x) elif type(df) == dict: for k in df.keys(): print(str(k) + ' ' + str(df[k])) else: print(df.to_string()) elif get_dataelements_brainvols: brainvol = GetBrainVolumeDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) elif get_brainvols: brainvol = GetBrainVolumes(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) else: #read query from text fiile with open(query_file, 'r') as fp: query = fp.read() df = sparql_query_nidm(nidm_file_list.split(','), query, output_file) if ((output_file) is None): print(df.to_string()) return df
def query(nidm_file_list, cde_file_list, query_file, output_file, get_participants, get_instruments, get_instrument_vars, get_dataelements, get_brainvols,get_dataelements_brainvols, uri, j, verbosity): """ This function provides query support for NIDM graphs. """ #query result list results = [] # if there is a CDE file list, seed the CDE cache if cde_file_list: getCDEs(cde_file_list.split(",")) if get_participants: df = GetParticipantIDs(nidm_file_list.split(','),output_file=output_file) if ((output_file) is None): print(df.to_string()) return df elif get_instruments: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count=1 for project in project_list: if count == 1: df = GetProjectInstruments(nidm_file_list.split(','),project_id=project) count+=1 else: df = df.append(GetProjectInstruments(nidm_file_list.split(','),project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) #with open(output_file,'w') as myfile: # wr=csv.writer(myfile,quoting=csv.QUOTE_ALL) # wr.writerow(df) #pd.DataFrame.from_records(df,columns=["Instruments"]).to_csv(output_file) else: print(df.to_string()) elif get_instrument_vars: #first get all project UUIDs then iterate and get instruments adding to output dataframe project_list = GetProjectsUUID(nidm_file_list.split(',')) count=1 for project in project_list: if count == 1: df = GetInstrumentVariables(nidm_file_list.split(','),project_id=project) count+=1 else: df = df.append(GetInstrumentVariables(nidm_file_list.split(','),project_id=project)) #write dataframe #if output file parameter specified if (output_file is not None): df.to_csv(output_file) else: print(df.to_string()) elif get_dataelements: datael = GetDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): datael.to_csv(output_file) else: print(datael.to_string()) elif uri: restParser = RestParser(verbosity_level = int(verbosity)) if j: restParser.setOutputFormat(RestParser.JSON_FORMAT) else: restParser.setOutputFormat(RestParser.CLI_FORMAT) df = restParser.run(nidm_file_list.split(','), uri) print (df) elif get_dataelements_brainvols: brainvol = GetBrainVolumeDataElements(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) elif get_brainvols: brainvol = GetBrainVolumes(nidm_file_list=nidm_file_list) #if output file parameter specified if (output_file is not None): brainvol.to_csv(output_file) else: print(brainvol.to_string()) elif query_file: df = sparql_query_nidm(nidm_file_list.split(','),query_file,output_file) if ((output_file) is None): print(df.to_string()) return df else: print("ERROR: No query parameter provided. See help:") print() os.system("pynidm query --help") exit(1)