예제 #1
0
def makeProjectTestFile2(filename):
    DCTYPES = Namespace("http://purl.org/dc/dcmitype/")

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "TEST B",  # this is the "title"
        Constants.NIDM_PROJECT_IDENTIFIER: 1234,
        Constants.NIDM_PROJECT_DESCRIPTION: "More Scans",
        Constants.NIDM_FILENAME: "testfile2.ttl",
        Constants.NIDM_PROJECT_LICENSE: "Creative Commons",
        Constants.NIDM_PROJECT_SOURCE: "Other",
        Constants.NIDM_HAD_NUMERICAL_VALUE: "numval???",
        Constants.NIDM_BATH_SOLUTION: "bath",
        Constants.NIDM_CELL_TYPE: "ctype",
        Constants.NIDM_CHANNEL_NUMBER: "5",
        Constants.NIDM_ELECTRODE_IMPEDANCE: ".01",
        Constants.NIDM_GROUP_LABEL: "group 123",
        Constants.NIDM_HOLLOW_ELECTRODE_SOLUTION: "water",
        Constants.NIDM_HAD_IMAGE_CONTRACT_TYPE: "off",
        Constants.NIDM_HAD_IMAGE_USAGE_TYPE: "abcd",
        Constants.NIDM_NUBMER_OF_CHANNELS: "11",
        Constants.NIDM_APPLIED_FILTER: "on",
        Constants.NIDM_SOLUTION_FLOW_SPEED: "2.8",
        Constants.NIDM_RECORDING_LOCATION: "lab"
    }
    project = Project(uuid="_123_" + filename, attributes=kwargs)
    s1 = Session(project)

    a1 = AssessmentAcquisition(session=s1)
    # = s1.add_acquisition("a1", attributes={"http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#Age" : 22})

    p1 = a1.add_person("p1",
                       attributes={
                           Constants.NIDM_GIVEN_NAME: "George",
                           Constants.NIDM_AGE: 22
                       })
    a1.add_qualified_association(person=p1, role=Constants.NIDM_PARTICIPANT)

    return saveProject(filename, project)
예제 #2
0
def main(argv):
    parser = ArgumentParser(description='This program will load in a CSV file and iterate over the header \
     variable names performing an elastic search of https://scicrunch.org/ for NIDM-ReproNim \
     tagged terms that fuzzy match the variable names.  The user will then interactively pick \
     a term to associate with the variable name.  The resulting annotated CSV data will \
     then be written to a NIDM data file.  Note, you must obtain an API key to Interlex by signing up \
     for an account at scicrunch.org then going to My Account and API Keys.  Then set the environment \
     variable INTERLEX_API_KEY with your key.')

    parser.add_argument('-csv', dest='csv_file', required=True, help="Full path to CSV file to convert")
    # parser.add_argument('-ilxkey', dest='key', required=True, help="Interlex/SciCrunch API key to use for query")
    parser.add_argument('-json_map', dest='json_map',required=False,help="Full path to user-suppled JSON file containing variable-term mappings.")
    parser.add_argument('-nidm', dest='nidm_file', required=False, help="Optional full path of NIDM file to add CSV->NIDM converted graph to")
    parser.add_argument('-no_concepts', action='store_true', required=False, help='If this flag is set then no concept associations will be'
                                'asked of the user.  This is useful if you already have a -json_map specified without concepts and want to'
                                'simply run this program to get a NIDM file with user interaction to associate concepts.')
    # parser.add_argument('-owl', action='store_true', required=False, help='Optionally searches NIDM OWL files...internet connection required')
    # parser.add_argument('-png', action='store_true', required=False, help='Optional flag, when set a PNG image file of RDF graph will be produced')
    # parser.add_argument('-jsonld', action='store_true', required=False, help='Optional flag, when set NIDM files are saved as JSON-LD instead of TURTLE')
    parser.add_argument('-log','--log', dest='logfile',required=False, default=None, help="full path to directory to save log file. Log file name is csv2nidm_[arg.csv_file].log")
    parser.add_argument('-out', dest='output_file', required=True, help="Full path with filename to save NIDM file")
    args = parser.parse_args()



    #open CSV file and load into
    df = pd.read_csv(args.csv_file)
    #temp = csv.reader(args.csv_file)
    #df = pd.DataFrame(temp)

    #maps variables in CSV file to terms
    #if args.owl is not False:
    #    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map, owl_file=args.owl)
    #else:
    # if user did not specify -no_concepts then associate concepts interactively with user
    if not args.no_concepts:
        column_to_terms, cde = map_variables_to_terms(df=df,  assessment_name=basename(args.csv_file),directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map)
    # run without concept mappings
    else:
        column_to_terms, cde = map_variables_to_terms(df=df, assessment_name=basename(args.csv_file),
                                                      directory=dirname(args.output_file), output_file=args.output_file,
                                                      json_file=args.json_map, associate_concepts=False)

    if args.logfile is not None:
        logging.basicConfig(filename=join(args.logfile,'csv2nidm_' + os.path.splitext(os.path.basename(args.csv_file))[0] + '.log'), level=logging.DEBUG)
        # add some logging info
        logging.info("csv2nidm %s" %args)


    #If user has added an existing NIDM file as a command line parameter then add to existing file for subjects who exist in the NIDM file
    if args.nidm_file:
        print("Adding to NIDM file...")
        #read in NIDM file
        project = read_nidm(args.nidm_file)
        #get list of session objects
        session_objs=project.get_sessions()

        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                key_tuple = eval(key)
                #id_field=key
                id_field = key_tuple.variable
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                df = pd.read_csv(args.csv_file,dtype={id_field : str})
                break

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            # Make sure user selected one of the options.  If not present user with selection input again
            while (not selection.isdigit()) or (int(selection) > int(option)):
                # Wait for user input
                selection = input("Please select the subject ID field from the list above: \t" % option)
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            df = pd.read_csv(args.csv_file,dtype={id_field : str})



        #use RDFLib here for temporary graph making query easier
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')

        print("Querying for existing participants in NIDM graph....")
        #find subject ids and sessions in NIDM document
        query = """SELECT DISTINCT ?session ?nidm_subj_id ?agent
                    WHERE {
                        ?activity prov:wasAssociatedWith ?agent ;
                            dct:isPartOf ?session  .
                        ?agent rdf:type prov:Agent ;
                            ndar:src_subject_id ?nidm_subj_id .
                    }"""
        #print(query)
        qres = rdf_graph.query(query)


        for row in qres:
            logging.info("found existing participant %s \t %s" %(row[0],row[1]))
            #find row in CSV file with subject id matching agent from NIDM file

            #csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
            #find row in CSV file with matching subject id to the agent in the NIDM file
            #be carefull about data types...simply type-change dataframe subject id column and query to strings.
            #here we're removing the leading 0's from IDs because pandas.read_csv strips those unless you know ahead of
            #time which column is the subject id....
            csv_row = df.loc[df[id_field].astype('str').str.contains(str(row[1]).lstrip("0"))]

            #if there was data about this subject in the NIDM file already (i.e. an agent already exists with this subject id)
            #then add this CSV assessment data to NIDM file, else skip it....
            if (not (len(csv_row.index)==0)):

                #NIDM document sesssion uuid
                session_uuid = row[0]

                #temporary list of string-based URIs of session objects from API
                temp = [o.identifier._uri for o in session_objs]
                #get session object from existing NIDM file that is associated with a specific subject id
                #nidm_session = (i for i,x in enumerate([o.identifier._uri for o in session_objs]) if x == str(session_uuid))
                nidm_session = session_objs[temp.index(str(session_uuid))]
                #for nidm_session in session_objs:
                #    if nidm_session.identifier._uri == str(session_uuid):
                #add an assessment acquisition for the phenotype data to session and associate with agent
                acq=AssessmentAcquisition(session=nidm_session)
                #add acquisition entity for assessment
                acq_entity = AssessmentObject(acquisition=acq)
                #add qualified association with existing agent
                acq.add_qualified_association(person=row[2],role=Constants.NIDM_PARTICIPANT)

                # add git-annex info if exists
                num_sources = addGitAnnexSources(obj=acq_entity,filepath=args.csv_file,bids_root=dirname(args.csv_file))
                # if there aren't any git annex sources then just store the local directory information
                if num_sources == 0:
                    # WIP: add absolute location of BIDS directory on disk for later finding of files
                    acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + args.csv_file})

                # store file to acq_entity
                acq_entity.add_attributes({Constants.NIDM_FILENAME:basename(args.csv_file)})

                #store other data from row with columns_to_term mappings
                for row_variable in csv_row:
                    #check if row_variable is subject id, if so skip it
                    if row_variable==id_field:
                        continue
                    else:
                        if not csv_row[row_variable].values[0]:
                            continue


                        add_attributes_with_cde(acq_entity, cde, row_variable, csv_row[row_variable].values[0])



                continue

        print ("Adding CDEs to graph....")
        # convert to rdflib Graph and add CDEs
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')
        rdf_graph = rdf_graph + cde

        print("Backing up original NIDM file...")
        copy2(src=args.nidm_file,dst=args.nidm_file+".bak")
        print("Writing NIDM file....")
        rdf_graph.serialize(destination=args.nidm_file,format='turtle')

    else:
        print("Creating NIDM file...")
        #If user did not choose to add this data to an existing NIDM file then create a new one for the CSV data
        #create empty project
        project=Project()

        #simply add name of file to project since we don't know anything about it
        project.add_attributes({Constants.NIDM_FILENAME:args.csv_file})


        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            # using skos:sameAs relationship to associate subject identifier variable from csv with a known term
            # for subject IDs
            if 'sameAs' in column_to_terms[key]:
                if Constants.NIDM_SUBJECTID.uri == column_to_terms[key]['sameAs']:
                    key_tuple = eval(key)
                    id_field=key_tuple.variable
                    #make sure id_field is a string for zero-padded subject ids
                    #re-read data file with constraint that key field is read as string
                    df = pd.read_csv(args.csv_file,dtype={id_field : str})
                    break

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            # Make sure user selected one of the options.  If not present user with selection input again
            while (not selection.isdigit()) or (int(selection) > int(option)):
                # Wait for user input
                selection = input("Please select the subject ID field from the list above: \t" % option)
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            df = pd.read_csv(args.csv_file,dtype={id_field : str})


        #iterate over rows and store in NIDM file
        for csv_index, csv_row in df.iterrows():
            #create a session object
            session=Session(project)

            #create and acquisition activity and entity
            acq=AssessmentAcquisition(session)
            acq_entity=AssessmentObject(acq)

            #create prov:Agent for subject
            #acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row['participant_id']}))

            # add git-annex info if exists
            num_sources = addGitAnnexSources(obj=acq_entity,filepath=args.csv_file,bids_root=os.path.dirname(args.csv_file))
            # if there aren't any git annex sources then just store the local directory information
            if num_sources == 0:
                # WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + args.csv_file})

            # store file to acq_entity
            acq_entity.add_attributes({Constants.NIDM_FILENAME : basename(args.csv_file)})


            #store other data from row with columns_to_term mappings
            for row_variable,row_data in csv_row.iteritems():
                if not row_data:
                    continue

                #check if row_variable is subject id, if so skip it
                if row_variable==id_field:
                    ### WIP: Check if agent already exists with the same ID.  If so, use it else create a new agent

                    #add qualified association with person
                    acq.add_qualified_association(person= acq.add_person(attributes=({Constants.NIDM_SUBJECTID:str(row_data)})),role=Constants.NIDM_PARTICIPANT)

                    continue
                else:
                    add_attributes_with_cde(acq_entity, cde, row_variable, row_data)

                    #print(project.serializeTurtle())

        # convert to rdflib Graph and add CDEs
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')
        rdf_graph = rdf_graph + cde

        print("Writing NIDM file....")
        rdf_graph.serialize(destination=args.output_file,format='turtle')
예제 #3
0
def bidsmri2project(directory, args):
    #Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,
                                   'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical(
                "Cannot find dataset_description.json file which is required in the BIDS spec"
            )
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %
                         os.path.join(directory))
        exit("-1")

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})
        #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document
        project.add_attributes({Constants.PROV['Location']: directory})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    participant = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory, 'participants.tsv')):
        with open(os.path.join(directory, 'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            #logic to map variables to terms.#########################################################################################################

            #first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            #we want to use the variable-term mapping functions to help the user do the mapping
            #iterate over columns
            mapping_list = []
            column_to_terms = {}
            for field in participants_data.fieldnames:

                #column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    #add column to list for column_to_terms mapping
                    mapping_list.append(field)

            #do variable-term mappings
            if ((args.json_map != False) or (args.key != None)):

                #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
                if args.json_map == False:
                    #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                    # if participants.json file doesn't exist then run without json mapping file
                    if not os.path.isfile(
                            os.path.join(directory, 'participants.json')):
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)

                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'))
                    else:
                        #maps variables in CSV file to terms
                        temp = DataFrame(columns=mapping_list)
                        column_to_terms, cde = map_variables_to_terms(
                            directory=directory,
                            assessment_name='participants.tsv',
                            df=temp,
                            apikey=args.key,
                            output_file=os.path.join(directory,
                                                     'participants.json'),
                            json_file=os.path.join(directory,
                                                   'participants.json'))

                else:
                    #maps variables in CSV file to terms
                    temp = DataFrame(columns=mapping_list)
                    column_to_terms, cde = map_variables_to_terms(
                        directory=directory,
                        assessment_name='participants.tsv',
                        df=temp,
                        apikey=args.key,
                        output_file=os.path.join(directory,
                                                 'participants.json'),
                        json_file=args.json_map)

            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: row['participant_id']
                    }))

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(
                    person=participant[subjid]['person'],
                    role=Constants.NIDM_PARTICIPANT)
                print(acq)

                for key, value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:

                        #if this was the participant_id, we already handled it above creating agent / qualified association
                        if not (BIDS_Constants.participants[key]
                                == Constants.NIDM_SUBJECTID):
                            acq_entity.add_attributes(
                                {BIDS_Constants.participants[key]: value})

                    #else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,
                                                cde=cde,
                                                row_variable=key,
                                                value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        #else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" % subject_id)
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        #check if there's a session number.  If so, store it in the session activity
        session_dirs = bids_layout.get(target='session',
                                       subject=subject_id,
                                       return_type='dir')
        #if session_dirs has entries then get any metadata about session and store in session activity

        #bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        #bids_layout.get(extensions='.tsv',return_type='obj')

        #check whether sessions have been created (i.e. was there a participants.tsv file?  If not, create here
        if not (subject_id in session):
            session[subject_id] = Session(project)

        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = MRAcquisition(session[subject_id])

            #check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one
            if not (subject_id in participant):
                participant[subject_id] = {}
                participant[subject_id]['person'] = acq.add_person(
                    attributes=({
                        Constants.NIDM_SUBJECTID: subject_id
                    }))

            #add qualified association with person
            acq.add_qualified_association(
                person=participant[subject_id]['person'],
                role=Constants.NIDM_PARTICIPANT)

            if file_tpl.entities['datatype'] == 'anat':
                #do something with anatomicals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #add file link
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                #get associated JSON file if exists
                #There is T1w.json file with information
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata
                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })

                #Parse T1w.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(os.path.join(directory,
                                               'T1w.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find T1w.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from T1w.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                "".join(dataset[key])
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'func':
                #do something with functionals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['datatype']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj.add_attributes({Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl.entities:
                    acq_obj.add_attributes({
                        BIDS_Constants.json_keys["run"]:
                        file_tpl.entities['run']
                    })

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #get associated events TSV file
                if 'run' in file_tpl.entities:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'],
                        run=file_tpl.entities['run'])
                else:
                    events_file = bids_layout.get(
                        subject=subject_id,
                        extensions=['.tsv'],
                        modality=file_tpl.entities['datatype'],
                        task=file_tpl.entities['task'])
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file

                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NIDM_FILENAME:
                        getRelPathToBIDS(events_file[0].filename, directory)
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

                #Parse task-rest_bold.json file in BIDS directory to add the attributes contained inside
                if (os.path.isdir(os.path.join(directory))):
                    try:
                        with open(
                                os.path.join(
                                    directory,
                                    'task-rest_bold.json')) as data_file:
                            dataset = json.load(data_file)
                    except OSError:
                        logging.critical(
                            "Cannot find task-rest_bold.json file which is required in the BIDS spec"
                        )
                        exit("-1")
                else:
                    logging.critical(
                        "Error: BIDS directory %s does not exist!" %
                        os.path.join(directory))
                    exit("-1")

                #add various attributes if they exist in BIDS dataset
                for key in dataset:
                    #if key from task-rest_bold.json file is mapped to term in BIDS_Constants.py then add to NIDM object
                    if key in BIDS_Constants.json_keys:
                        if type(dataset[key]) is list:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                ",".join(map(str, dataset[key]))
                            })
                        else:
                            acq_obj.add_attributes(
                                {BIDS_Constants.json_keys[key]: dataset[key]})

            elif file_tpl.entities['datatype'] == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.entities['suffix'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.entities['suffix']]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['suffix'])

                #add image usage type
                if file_tpl.entities['datatype'] in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans["dti"]
                    })
                else:
                    logging.info(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.entities['datatype'])
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(join(file_tpl.dirname, file_tpl.filename),
                                     directory)
                })
                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = (bids_layout.get(
                    suffix=file_tpl.entities['suffix'],
                    subject=subject_id))[0].metadata

                if len(json_data.info) > 0:
                    for key in json_data.info.items():
                        if key in BIDS_Constants.json_keys:
                            if type(json_data.info[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(
                                        str(e) for e in json_data.info[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data.info[key]
                                })
                #for bval and bvec files, what to do with those?

                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bval(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bval.add_attributes(
                    {Constants.PROV['Location']: directory})

                #add sha512 sum
                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    acq_obj_bval.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(
                        join(file_tpl.dirname,
                             bids_layout.get_bvec(file_tpl.filename)),
                        directory)
                })
                #WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_obj_bvec.add_attributes(
                    {Constants.PROV['Location']: directory})

                if isfile(join(directory, file_tpl.dirname,
                               file_tpl.filename)):
                    #add sha512 sum
                    acq_obj_bvec.add_attributes({
                        Constants.CRYPTO_SHA512:
                        getsha512(
                            join(directory, file_tpl.dirname,
                                 file_tpl.filename))
                    })
                else:
                    logging.info(
                        "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..."
                        % join(directory, file_tpl.dirname, file_tpl.filename))

                #link bval and bvec acquisition object entities together or is their association with DWI scan...

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        #WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        #add qualified association with person
                        acq.add_qualified_association(
                            person=participant[subject_id]['person'],
                            role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)

                        for key, value in row.items():
                            if not value:
                                continue
                            #we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            #BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes({
                            Constants.NIDM_FILENAME:
                            getRelPathToBIDS(tsv_file, directory)
                        })
                        #WIP: add absolute location of BIDS directory on disk for later finding of files
                        acq_entity.add_attributes(
                            {Constants.PROV['Location']: directory})

                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes({
                                Constants.BIDS["data_dictionary"]:
                                getRelPathToBIDS(data_dict, directory)
                            })

    return project, cde
예제 #4
0
def bidsmri2project(directory, args):

    # initialize empty cde graph...it may get replaced if we're doing variable to term mapping or not
    cde=Graph()

    # Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        try:
            with open(os.path.join(directory,'dataset_description.json')) as data_file:
                dataset = json.load(data_file)
        except OSError:
            logging.critical("Cannot find dataset_description.json file which is required in the BIDS spec")
            exit("-1")
    else:
        logging.critical("Error: BIDS directory %s does not exist!" %os.path.join(directory))
        exit("-1")

    # create project / nidm-exp doc
    project = Project()

    # if there are git annex sources then add them
    num_sources=addGitAnnexSources(obj=project.get_uuid(),bids_root=directory)
    # else just add the local path to the dataset
    if num_sources == 0:
        project.add_attributes({Constants.PROV['Location']:"file:/" + directory})


    # add various attributes if they exist in BIDS dataset
    for key in dataset:
        # if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({BIDS_Constants.dataset_description[key]:"".join(dataset[key])})
            else:
                project.add_attributes({BIDS_Constants.dataset_description[key]:dataset[key]})




    # get BIDS layout
    bids_layout = BIDSLayout(directory)


    # create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session={}
    participant={}
    # Parse participants.tsv file in BIDS directory and create study and acquisition objects
    if os.path.isfile(os.path.join(directory,'participants.tsv')):
        with open(os.path.join(directory,'participants.tsv')) as csvfile:
            participants_data = csv.DictReader(csvfile, delimiter='\t')

            # logic to map variables to terms.
            # first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not.  For those that are not
            # we want to use the variable-term mapping functions to help the user do the mapping
            # iterate over columns
            mapping_list=[]
            column_to_terms={}
            for field in participants_data.fieldnames:

                # column is not in BIDS_Constants
                if not (field in BIDS_Constants.participants):
                    # add column to list for column_to_terms mapping
                    mapping_list.append(field)



            #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use
            if args.json_map == False:
                #defaults to participants.json because here we're mapping the participants.tsv file variables to terms
                # if participants.json file doesn't exist then run without json mapping file
                if not os.path.isfile(os.path.join(directory,'participants.json')):
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory,assessment_name='participants.tsv',
                            df=temp,output_file=os.path.join(directory,'participants.json'),bids=True)
                else:
                    #maps variables in CSV file to terms
                    temp=DataFrame(columns=mapping_list)
                    if args.no_concepts:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True,associate_concepts=False)
                    else:
                        column_to_terms,cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                            output_file=os.path.join(directory,'participants.json'),json_file=os.path.join(directory,'participants.json'),bids=True)
            else:
                #maps variables in CSV file to terms
                temp=DataFrame(columns=mapping_list)
                if args.no_concepts:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True,associate_concepts=False)
                else:
                    column_to_terms, cde = map_variables_to_terms(directory=directory, assessment_name='participants.tsv', df=temp,
                        output_file=os.path.join(directory,'participants.json'),json_file=args.json_map,bids=True)


            for row in participants_data:
                #create session object for subject to be used for participant metadata and image data
                #parse subject id from "sub-XXXX" string
                temp = row['participant_id'].split("-")
                #for ambiguity in BIDS datasets.  Sometimes participant_id is sub-XXXX and othertimes it's just XXXX
                if len(temp) > 1:
                    subjid = temp[1]
                else:
                    subjid = temp[0]
                logging.info(subjid)
                session[subjid] = Session(project)

                #add acquisition object
                acq = AssessmentAcquisition(session=session[subjid])

                acq_entity = AssessmentObject(acquisition=acq)
                participant[subjid] = {}
                participant[subjid]['person'] = acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row['participant_id']}))

                # add nfo:filename entry to assessment entity to reflect provenance of where this data came from
                acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(os.path.join(directory,'participants.tsv'),directory)})
                #acq_entity.add_attributes({Constants.NIDM_FILENAME:os.path.join(directory,'participants.tsv')})

                #add qualified association of participant with acquisition activity
                acq.add_qualified_association(person=participant[subjid]['person'],role=Constants.NIDM_PARTICIPANT)
                # print(acq)

                # if there are git annex sources for participants.tsv file then add them
                num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                # else just add the local path to the dataset
                if num_sources == 0:
                    acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.tsv')})

                 # if there's a JSON sidecar file then create an entity and associate it with all the assessment entities
                if os.path.isfile(os.path.join(directory,'participants.json')):
                    json_sidecar = AssessmentObject(acquisition=acq)
                    json_sidecar.add_attributes({PROV_TYPE:QualifiedName(Namespace("bids",Constants.BIDS),"sidecar_file"), Constants.NIDM_FILENAME:
                        getRelPathToBIDS(os.path.join(directory,'participants.json'),directory)})

                    # add Git Annex Sources
                    # if there are git annex sources for participants.tsv file then add them
                    num_sources=addGitAnnexSources(obj=json_sidecar.get_uuid(),filepath=os.path.join(directory,'participants.json'),bids_root=directory)
                    # else just add the local path to the dataset
                    if num_sources == 0:
                        json_sidecar.add_attributes({Constants.PROV['Location']:"file:/" + os.path.join(directory,'participants.json')})


                # check if json_sidecar entity exists and if so associate assessment entity with it
                if 'json_sidecar' in  locals():
                    #connect json_entity with acq_entity
                    acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_sidecar})

                for key,value in row.items():
                    if not value:
                        continue
                    #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user
                    #supplied arguments to map variables
                    if key in BIDS_Constants.participants:
                        # WIP
                        # Here we are adding to CDE graph data elements for BIDS Constants that remain fixed for each BIDS-compliant dataset

                        if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):


                            # create a namespace with the URL for fixed BIDS_Constants term
                            # item_ns = Namespace(str(Constants.BIDS.namespace.uri))
                            # add prefix to namespace which is the BIDS fixed variable name
                            # cde.bind(prefix="bids", namespace=item_ns)
                            # ID for BIDS variables is always the same bids:[bids variable]
                            cde_id = Constants.BIDS[key]
                            # add the data element to the CDE graph
                            cde.add((cde_id,RDF.type, Constants.NIDM['DataElement']))
                            cde.add((cde_id,RDF.type, Constants.PROV['Entity']))
                            # add some basic information about this data element
                            cde.add((cde_id,Constants.RDFS['label'],Literal(BIDS_Constants.participants[key].localpart)))
                            cde.add((cde_id,Constants.NIDM['isAbout'],URIRef(BIDS_Constants.participants[key].uri)))
                            cde.add((cde_id,Constants.NIDM['source_variable'],Literal(key)))
                            cde.add((cde_id,Constants.NIDM['description'],Literal("participant/subject identifier")))
                            cde.add((cde_id,Constants.RDFS['comment'],Literal("BIDS participants_id variable fixed in specification")))
                            cde.add((cde_id,Constants.RDFS['valueType'],URIRef(Constants.XSD["string"])))

                            acq_entity.add_attributes({cde_id:Literal(value)})

                        # if this was the participant_id, we already handled it above creating agent / qualified association
                        # if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID):
                        #    acq_entity.add_attributes({BIDS_Constants.participants[key]:value})


                    # else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used
                    # in CSV2NIDM.py)
                    else:

                        # WIP: trying to add new support for CDEs...
                        add_attributes_with_cde(prov_object=acq_entity,cde=cde,row_variable=key,value=value)
                        # if key in column_to_terms:
                        #    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(key)), column_to_terms[key]["url"]), ""):value})
                        # else:

                        #    acq_entity.add_attributes({Constants.BIDS[key.replace(" ", "_")]:value})


    # create acquisition objects for each scan for each subject

    # loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        logging.info("Converting subject: %s" %subject_id)
        # skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        # check if there are a session numbers.  If so, store it in the session activity and create a new
        # sessions for these imaging acquisitions.  Because we don't know which imaging session the root
        # participants.tsv file data may be associated with we simply link the imaging acquisitions to different
        # sessions (i.e. the participants.tsv file goes into an AssessmentAcquisition and linked to a unique
        # sessions and the imaging acquisitions go into MRAcquisitions and has a unique session)
        imaging_sessions = bids_layout.get_sessions(subject=subject_id)
        # if session_dirs has entries then get any metadata about session and store in session activity

        # bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        # bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        # bids_layout.get(extensions='.tsv',return_type='obj')

        # loop through each session if there is a sessions directory
        if len(imaging_sessions) > 0:
            for img_session in imaging_sessions:
                # create a new session
                ses = Session(project)
                # add session number as metadata
                ses.add_attributes({Constants.BIDS['session_number']:img_session})
                addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=ses,participant=participant, directory=directory,img_session=img_session)
        # else we have no ses-* directories in the BIDS layout
        addimagingsessions(bids_layout=bids_layout,subject_id=subject_id,session=Session(project),participant=participant, directory=directory)



        # Added temporarily to support phenotype files
        # for each *.tsv / *.json file pair in the phenotypes directory
        # WIP: ADD VARIABLE -> TERM MAPPING HERE
        for tsv_file in glob.glob(os.path.join(directory,"phenotype","*.tsv")):
            # for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            # the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        # add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        # add qualified association with person
                        acq.add_qualified_association(person=participant[subject_id]['person'],role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)



                        for key,value in row.items():
                            if not value:
                                continue
                            # we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            # BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                # for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes({Constants.BIDS[key]:value})

                        # link TSV file
                        acq_entity.add_attributes({Constants.NIDM_FILENAME:getRelPathToBIDS(tsv_file,directory)})
                        #acq_entity.add_attributes({Constants.NIDM_FILENAME:tsv_file})

                        # if there are git annex sources for participants.tsv file then add them
                        num_sources=addGitAnnexSources(obj=acq_entity.get_uuid(),bids_root=directory)
                        # else just add the local path to the dataset
                        if num_sources == 0:
                            acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + tsv_file})


                        # link associated JSON file if it exists
                        data_dict = os.path.join(directory,"phenotype",os.path.splitext(os.path.basename(tsv_file))[0]+ ".json")
                        if os.path.isfile(data_dict):
                            # if file exists, create a new entity and associate it with the appropriate activity  and a used relationship
                            # with the TSV-related entity
                            json_entity = AssessmentObject(acquisition=acq)
                            json_entity.add_attributes({PROV_TYPE:Constants.BIDS["sidecar_file"], Constants.NIDM_FILENAME:
                                getRelPathToBIDS(data_dict,directory)})

                            # add Git Annex Sources
                            # if there are git annex sources for participants.tsv file then add them
                            num_sources=addGitAnnexSources(obj=json_entity.get_uuid(),filepath=data_dict,bids_root=directory)
                            # else just add the local path to the dataset
                            if num_sources == 0:
                                json_entity.add_attributes({Constants.PROV['Location']:"file:/" + data_dict})

                            #connect json_entity with acq_entity
                            acq_entity.add_attributes({Constants.PROV["wasInfluencedBy"]:json_entity.get_uuid()})


    return project, cde
예제 #5
0
def main(argv):
    parser = ArgumentParser(description='This program will load in a CSV file and iterate over the header \
     variable names performing an elastic search of https://scicrunch.org/ for NIDM-ReproNim \
     tagged terms that fuzzy match the variable names.  The user will then interactively pick \
     a term to associate with the variable name.  The resulting annotated CSV data will \
     then be written to a NIDM data file.')

    parser.add_argument('-csv', dest='csv_file', required=True, help="Path to CSV file to convert")
    parser.add_argument('-ilxkey', dest='key', required=True, help="Interlex/SciCrunch API key to use for query")
    parser.add_argument('-json_map', dest='json_map',required=False,help="User-suppled JSON file containing variable-term mappings.")
    parser.add_argument('-nidm', dest='nidm_file', required=False, help="Optional NIDM file to add CSV->NIDM converted graph to")
    #parser.add_argument('-owl', action='store_true', required=False, help='Optionally searches NIDM OWL files...internet connection required')
    parser.add_argument('-png', action='store_true', required=False, help='Optional flag, when set a PNG image file of RDF graph will be produced')
    parser.add_argument('-jsonld', action='store_true', required=False, help='Optional flag, when set NIDM files are saved as JSON-LD instead of TURTLE')
    parser.add_argument('-out', dest='output_file', required=True, help="Filename to save NIDM file")
    args = parser.parse_args()

    #open CSV file and load into
    df = pd.read_csv(args.csv_file)

    #maps variables in CSV file to terms
    #if args.owl is not False:
    #    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map, owl_file=args.owl)
    #else:
    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map)



    #If user has added an existing NIDM file as a command line parameter then add to existing file for subjects who exist in the NIDM file
    if args.nidm_file:
        print("Adding to NIDM file...")
        #read in NIDM file
        project = read_nidm(args.nidm_file)
        #get list of session objects
        session_objs=project.get_sessions()

        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                id_field=key
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                #df = pd.read_csv(args.csv_file,dtype={id_field : str})

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            #df = pd.read_csv(args.csv_file,dtype={id_field : str})



        #use RDFLib here for temporary graph making query easier
        rdf_graph = Graph()
        rdf_graph_parse = rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')

        #find subject ids and sessions in NIDM document
        query = """SELECT DISTINCT ?session ?nidm_subj_id ?agent
                    WHERE {
                        ?activity prov:wasAssociatedWith ?agent ;
                            dct:isPartOf ?session  .
                        ?agent rdf:type prov:Agent ;
                            ndar:src_subject_id ?nidm_subj_id .
                    }"""
        #print(query)
        qres = rdf_graph_parse.query(query)


        for row in qres:
            print('%s \t %s' %(row[0],row[1]))
            #find row in CSV file with subject id matching agent from NIDM file

            #csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
            #find row in CSV file with matching subject id to the agent in the NIDM file
            #be carefull about data types...simply type-change dataframe subject id column and query to strings.
            #here we're removing the leading 0's from IDs because pandas.read_csv strips those unless you know ahead of
            #time which column is the subject id....
            csv_row = df.loc[df[id_field].astype('str').str.contains(str(row[1]).lstrip("0"))]

            #if there was data about this subject in the NIDM file already (i.e. an agent already exists with this subject id)
            #then add this CSV assessment data to NIDM file, else skip it....
            if (not (len(csv_row.index)==0)):

                #NIDM document sesssion uuid
                session_uuid = row[0]

                #temporary list of string-based URIs of session objects from API
                temp = [o.identifier._uri for o in session_objs]
                #get session object from existing NIDM file that is associated with a specific subject id
                #nidm_session = (i for i,x in enumerate([o.identifier._uri for o in session_objs]) if x == str(session_uuid))
                nidm_session = session_objs[temp.index(str(session_uuid))]
                #for nidm_session in session_objs:
                #    if nidm_session.identifier._uri == str(session_uuid):
                #add an assessment acquisition for the phenotype data to session and associate with agent
                acq=AssessmentAcquisition(session=nidm_session)
                #add acquisition entity for assessment
                acq_entity = AssessmentObject(acquisition=acq)
                #add qualified association with existing agent
                acq.add_qualified_association(person=row[2],role=Constants.NIDM_PARTICIPANT)

                #store other data from row with columns_to_term mappings
                for row_variable in csv_row:
                    #check if row_variable is subject id, if so skip it
                    if row_variable==id_field:
                        continue
                    else:
                        if not csv_row[row_variable].values[0]:
                            continue
                        #get column_to_term mapping uri and add as namespace in NIDM document
                        #provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"])
                        acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"]), ""):csv_row[row_variable].values[0]})
                continue

        #serialize NIDM file
        with open(args.nidm_file,'w') as f:
            print("Writing NIDM file...")
            if args.jsonld:
                f.write(project.serializeJSONLD())
            else:
                f.write(project.serializeTurtle())

            project.save_DotGraph(str(args.nidm_file + ".png"), format="png")



    else:
        print("Creating NIDM file...")
        #If user did not choose to add this data to an existing NIDM file then create a new one for the CSV data
        #create empty project
        project=Project()

        #simply add name of file to project since we don't know anything about it
        project.add_attributes({Constants.NIDM_FILENAME:args.csv_file})


        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                id_field=key
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                #df = pd.read_csv(args.csv_file,dtype={id_field : str})

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            id_field=df.columns[int(selection)-1]


        #iterate over rows and store in NIDM file
        for csv_index, csv_row in df.iterrows():
            #create a session object
            session=Session(project)

            #create and acquisition activity and entity
            acq=AssessmentAcquisition(session)
            acq_entity=AssessmentObject(acq)



            #store other data from row with columns_to_term mappings
            for row_variable,row_data in csv_row.iteritems():
                if not row_data:
                    continue
                #check if row_variable is subject id, if so skip it
                if row_variable==id_field:
                    #add qualified association with person
                    acq.add_qualified_association(person= acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row_data})),role=Constants.NIDM_PARTICIPANT)

                    continue
                else:
                    #get column_to_term mapping uri and add as namespace in NIDM document
                    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"]),""):row_data})
                    #print(project.serializeTurtle())

        #serialize NIDM file
        with open(args.output_file,'w') as f:
            print("Writing NIDM file...")
            if args.jsonld:
                f.write(project.serializeJSONLD())
            else:
                f.write(project.serializeTurtle())
            if args.png:
                project.save_DotGraph(str(args.output_file + ".png"), format="png")
예제 #6
0
파일: BIDSMRI2NIDM.py 프로젝트: mih/PyNIDM
def bidsmri2project(directory):
    #Parse dataset_description.json file in BIDS directory
    if (os.path.isdir(os.path.join(directory))):
        with open(os.path.join(directory,
                               'dataset_description.json')) as data_file:
            dataset = json.load(data_file)
    else:
        print("Error: BIDS directory %s does not exist!" %
              os.path.join(directory))
        exit("-1")
    #print(dataset_data)

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #print(key)
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})
        #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document
        project.add_attributes({Constants.PROV['Location']: directory})
    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    participant = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    with open(os.path.join(directory, 'participants.tsv')) as csvfile:
        participants_data = csv.DictReader(csvfile, delimiter='\t')
        #print(participants_data.fieldnames)
        for row in participants_data:
            #create session object for subject to be used for participant metadata and image data
            #parse subject id from "sub-XXXX" string
            subjid = row['participant_id'].split("-")
            session[subjid[1]] = Session(project)

            #add acquisition object
            acq = AssessmentAcquisition(session=session[subjid[1]])

            acq_entity = AssessmentObject(acquisition=acq)
            participant[subjid[1]] = {}
            participant[subjid[1]]['person'] = acq.add_person(
                attributes=({
                    Constants.NIDM_SUBJECTID: row['participant_id']
                }))

            #add qualified association of participant with acquisition activity
            acq.add_qualified_association(
                person=participant[subjid[1]]['person'],
                role=Constants.NIDM_PARTICIPANT)

            for key, value in row.items():
                #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those
                if key in BIDS_Constants.participants:
                    #if this was the participant_id, we already handled it above creating agent / qualified association
                    if not (BIDS_Constants.participants[key]
                            == Constants.NIDM_SUBJECTID):
                        acq_entity.add_attributes(
                            {BIDS_Constants.participants[key]: value})
                #else just put variables in bids namespace since we don't know what they mean
                else:
                    #acq_entity.add_attributes({Constants.BIDS[quote(key)]:value})
                    acq_entity.add_attributes(
                        {Constants.BIDS[key.replace(" ", "_")]: value})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        print("Converting subject: %s" % subject_id)
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue

        #check if there's a session number.  If so, store it in the session activity
        session_dirs = bids_layout.get(target='session',
                                       subject=subject_id,
                                       return_type='dir')
        #if session_dirs has entries then get any metadata about session and store in session activity

        #bids_layout.get(subject=subject_id,type='session',extensions='.tsv')
        #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv')
        #bids_layout.get(extensions='.tsv',return_type='obj')

        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = MRAcquisition(session[subject_id])

            #add qualified association with person
            acq.add_qualified_association(
                person=participant[subject_id]['person'],
                role=Constants.NIDM_PARTICIPANT)

            #print(file_tpl.type)
            if file_tpl.modality == 'anat':
                #do something with anatomicals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    print(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.modality]
                    })
                else:
                    print(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                #make relative link to
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })
                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)
                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })
            elif file_tpl.modality == 'func':
                #do something with functionals
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    print(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans[file_tpl.modality]
                    })
                else:
                    print(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })

                #get associated events TSV file
                if 'run' in file_tpl._fields:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task,
                                                  run=file_tpl.run)
                else:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task)
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file

                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NIDM_FILENAME:
                        getRelPathToBIDS(events_file[0].filename, directory)
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

            elif file_tpl.modality == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRObject(acq)
                #add image contrast type
                if file_tpl.type in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_CONTRAST_TYPE:
                        BIDS_Constants.scans[file_tpl.type]
                    })
                else:
                    print(
                        "WARNING: No matching image contrast type found in BIDS_Constants.py for %s"
                        % file_tpl.type)

                #add image usage type
                if file_tpl.modality in BIDS_Constants.scans:
                    acq_obj.add_attributes({
                        Constants.NIDM_IMAGE_USAGE_TYPE:
                        BIDS_Constants.scans["dti"]
                    })
                else:
                    print(
                        "WARNING: No matching image usage type found in BIDS_Constants.py for %s"
                        % file_tpl.modality)
                #add file link
                acq_obj.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(file_tpl.filename, directory)
                })
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key.replace(
                                        " ", "_")]:
                                    json_data[key]
                                })

                #for bval and bvec files, what to do with those?

                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(bids_layout.get_bval(file_tpl.filename),
                                     directory)
                })
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    getRelPathToBIDS(bids_layout.get_bvec(file_tpl.filename),
                                     directory)
                })

                #link bval and bvec acquisition object entities together or is their association with DWI scan...

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = AssessmentAcquisition(session=session[subjid[1]])
                        #add qualified association with person
                        acq.add_qualified_association(
                            person=participant[subject_id]['person'],
                            role=Constants.NIDM_PARTICIPANT)

                        acq_entity = AssessmentObject(acquisition=acq)

                        for key, value in row.items():
                            #we're using participant_id in NIDM in agent so don't add to assessment as a triple.
                            #BIDS phenotype files seem to have an index column with no column header variable name so skip those
                            if ((not key == "participant_id") and (key != "")):
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes({
                            Constants.NIDM_FILENAME:
                            getRelPathToBIDS(tsv_file, directory)
                        })
                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes({
                                Constants.BIDS["data_dictionary"]:
                                getRelPathToBIDS(data_dict, directory)
                            })

    return project