Beispiel #1
0
def test_uri_project_list():

    import uuid

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    proj1_uuid = str(uuid.uuid1())
    proj2_uuid = str(uuid.uuid1())
    project = Project(uuid=proj1_uuid,attributes=kwargs)
    #save a turtle file
    with open("uritest.ttl",'w') as f:
        f.write(project.serializeTurtle())

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
    project = Project(uuid=proj2_uuid,attributes=kwargs)
    #save a turtle file
    with open("uritest2.ttl",'w') as f:
        f.write(project.serializeTurtle())

    restParser = RestParser()
    result = restParser.run(['uritest.ttl', 'uritest2.ttl'], '/projects')


    project_uuids = []

    for uuid in result:
        project_uuids.append(uuid)

    assert type(result) == list
    assert len(project_uuids) >= 2
    assert proj1_uuid in project_uuids
    assert proj2_uuid in project_uuids

    os.remove("uritest.ttl")
    os.remove("uritest2.ttl")
Beispiel #2
0
def test_GetProjectMetadata():

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    project = Project(uuid="_123456",attributes=kwargs)

    session = Session(project)
    acq = AssessmentAcquisition(session)

    kwargs={Constants.NIDM_HANDEDNESS:"Left", Constants.NIDM_AGE:"90"}
    acq_obj = AssessmentObject(acq,kwargs)

    #save a turtle file
    with open("test.ttl",'w') as f:
        f.write(project.serializeTurtle())

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    project = Project(uuid="_654321",attributes=kwargs)

    session = Session(project)
    acq = AssessmentAcquisition(session)

    kwargs={Constants.NIDM_HANDEDNESS:"Right", Constants.NIDM_AGE:"75"}
    acq_obj = AssessmentObject(acq,kwargs)

    #save a turtle file
    with open("test2.ttl",'w') as f:
        f.write(project.serializeTurtle())


    test = Query.GetProjectMetadata(["test.ttl", "test2.ttl"])
Beispiel #3
0
def test_uri_project_id():

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "1234356 Test investigation"
    }
    project = Project(uuid="_123456", attributes=kwargs)
    #save a turtle file
    with open("uri2test.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseIII",
        Constants.NIDM_PROJECT_IDENTIFIER: 1200,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation2"
    }
    project = Project(uuid="_654321", attributes=kwargs)
    #save a turtle file
    with open("uri2test2.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    result = restParser(['uri2test.ttl', 'uri2test2.ttl'],
                        '/projects/nidm:_123456')

    pp = pprint.PrettyPrinter()
    pp.pprint(result)

    assert type(result) == dict
    assert result["dct:description"] == "1234356 Test investigation"
Beispiel #4
0
def saveTestFile(file_name, data):
    project = Project(uuid="_123_" + file_name, attributes=data)

    # save a turtle file
    with open(file_name, 'w') as f:
        f.write(project.serializeTurtle())
    return "nidm:_123_{}".format(file_name)
Beispiel #5
0
def test_GetParticipantIDs():

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    project = Project(uuid="_123456", attributes=kwargs)
    session = Session(uuid="_13579", project=project)
    acq = Acquisition(uuid="_15793", session=session)
    acq2 = Acquisition(uuid="_15795", session=session)

    person = acq.add_person(attributes=({Constants.NIDM_SUBJECTID: "9999"}))
    acq.add_qualified_association(person=person,
                                  role=Constants.NIDM_PARTICIPANT)

    person2 = acq2.add_person(attributes=({Constants.NIDM_SUBJECTID: "8888"}))
    acq2.add_qualified_association(person=person2,
                                   role=Constants.NIDM_PARTICIPANT)

    #save a turtle file
    with open("test.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    participant_list = Query.GetParticipantIDs(["test.ttl"])

    remove("test.ttl")
    assert (participant_list['ID'].str.contains('9999').any())
    assert (participant_list['ID'].str.contains('8888').any())
Beispiel #6
0
def test_1(tmpdir):
    tmpdir.chdir()

    project = Project()

    #save a turtle file
    with open("test.ttl", 'w') as f:
        f.write(project.serializeTurtle())
Beispiel #7
0
def test_2(tmpdir):
    tmpdir.chdir()

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    project = Project(attributes=kwargs)

    with open("test.ttl", 'w') as f:
        f.write(project.serializeTurtle())
Beispiel #8
0
def test_uri_project_id():

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"1234356 Test investigation"}
    project = Project(uuid="_123456",attributes=kwargs)
    #save a turtle file
    with open("uri2test.ttl",'w') as f:
        f.write(project.serializeTurtle())

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseIII",Constants.NIDM_PROJECT_IDENTIFIER:1200,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation2"}
    project = Project(uuid="_654321",attributes=kwargs)
    #save a turtle file
    with open("uri2test2.ttl",'w') as f:
        f.write(project.serializeTurtle())

    result = restParser(['uri2test.ttl', 'uri2test2.ttl'], '/projects/{}_123456'.format(Query.matchPrefix(Constants.NIIRI)) )

    assert type(result) == dict
    assert result["dct:description"] == "1234356 Test investigation"

    os.remove("uri2test.ttl")
    os.remove("uri2test2.ttl")
Beispiel #9
0
def test_GetProjects():

    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    project = Project(uuid="_123456",attributes=kwargs)


    #save a turtle file
    with open("test.ttl",'w') as f:
        f.write(project.serializeTurtle())

    project_list = Query.GetProjectsUUID(["test.ttl"])

    assert URIRef(Constants.NIDM + "_123456") in project_list
Beispiel #10
0
def test_uri_project():

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    project = Project(uuid="_123456", attributes=kwargs)
    #save a turtle file
    with open("uritest.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseIII",
        Constants.NIDM_PROJECT_IDENTIFIER: 1200,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation2"
    }
    project = Project(uuid="_654321", attributes=kwargs)
    #save a turtle file
    with open("uritest2.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    result = restParser(['uritest.ttl', 'uritest2.ttl'], '/projects')

    print(result)

    project_uuids = []

    for uuid in result:
        project_uuids.append(uuid)

    assert type(result) == list
    assert len(project_uuids) == 2
    assert str(Constants.NIDM_URL) + "_123456" in project_uuids
    assert str(Constants.NIDM_URL) + "_654321" in project_uuids

    os.remove("uritest.ttl")
    os.remove("uritest2.ttl")
Beispiel #11
0
def test_GetProjectMetadata():

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    project = Project(uuid="_123456", attributes=kwargs)

    #save a turtle file
    with open("test.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseIII",
        Constants.NIDM_PROJECT_IDENTIFIER: 1200,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation2"
    }
    project = Project(uuid="_654321", attributes=kwargs)

    #save a turtle file
    with open("test2.ttl", 'w') as f:
        f.write(project.serializeTurtle())
Beispiel #12
0
def test_GetProjectMetadata():

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    project = Project(uuid="_123456", attributes=kwargs)

    #save a turtle file
    with open("test_gpm.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseIII",
        Constants.NIDM_PROJECT_IDENTIFIER: 1200,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation2"
    }
    project = Project(uuid="_654321", attributes=kwargs)

    #save a turtle file
    with open("test2_gpm.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    #WIP test = Query.GetProjectMetadata(["test.ttl", "test2.ttl"])

    #assert URIRef(Constants.NIDM + "_654321") in test
    #assert URIRef(Constants.NIDM + "_123456") in test
    #assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "1200") in test
    #assert URIRef(Constants.NIDM_PROJECT_IDENTIFIER + "9610") in test
    #assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseII")) in test
    #assert URIRef((Constants.NIDM_PROJECT_NAME + "FBIRN_PhaseIII")) in test
    #assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation")) in test
    #assert URIRef((Constants.NIDM_PROJECT_DESCRIPTION + "Test investigation2")) in test

    remove("test_gpm.ttl")
    remove("test2_gpm.ttl")
Beispiel #13
0
def main(argv):
    #create new nidm-experiment document with project
    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    project = Project(attributes=kwargs)
    
    #test add string attribute with existing namespace
    #nidm_doc.addLiteralAttribute("nidm","isFun","ForMe")
    project.add_attributes({Constants.NIDM["isFun"]:"ForMe"})

    #test adding string attribute with new namespace/term
    project.addLiteralAttribute("fred","notFound","in namespaces","www.fred.org/")

    #test add float attribute
    project.addLiteralAttribute("nidm", "float", float(2.34))

    #test adding attributes in bulk with mix of existing and new namespaces
    #nidm_doc.addAttributesWithNamespaces(nidm_doc.getProject(),[{"prefix":"nidm", "uri":nidm_doc.namespaces["nidm"], "term":"score", "value":int(15)}, \
        #                                              {"prefix":"dave", "uri":"http://www.davidkeator.com/", "term":"isAwesome", "value":"15"}, \
        #                                              {"prefix":"nidm", "uri":nidm_doc.namespaces["nidm"], "term":"value", "value":float(2.34)}])
    
    #nidm_doc.addAttributes(nidm_doc.getProject(),{"nidm:test":int(15), "ncit:isTerminology":"15","ncit:joker":float(1)})


    #test add PI to investigation
    project_PI = project.add_person(role=Constants.NIDM_PI,  attributes={Constants.NIDM_FAMILY_NAME:"Keator", Constants.NIDM_GIVEN_NAME:"David"})
    
    #test add session to graph and associate with project
    session = Session(project)
    project.add_sessions(session)

    #test add acquisition activity to graph and associate with session
    acq_act = Acquisition(session=session)
    #test add acquisition object entity to graph associated with participant role NIDM_PARTICIPANT
    acq_entity = MRAcquisitionObject(acquisition=acq_act)
    acq_entity.add_person(role=Constants.NIDM_PARTICIPANT,attributes={Constants.NIDM_GIVEN_NAME:"George"})

    #save a turtle file
    with open("test.ttl",'w') as f:
        f.write (project.serializeTurtle())

    #save a DOT graph as PDF
    project.save_DotGraph("test.png",format="png")
Beispiel #14
0
def test_GetProjectInstruments():
    kwargs = {
        Constants.NIDM_PROJECT_NAME: "FBIRN_PhaseII",
        Constants.NIDM_PROJECT_IDENTIFIER: 9610,
        Constants.NIDM_PROJECT_DESCRIPTION: "Test investigation"
    }
    proj_uuid = "_123456gpi"
    project = Project(uuid=proj_uuid, attributes=kwargs)

    session = Session(project)
    acq = AssessmentAcquisition(session)

    kwargs = {
        pm.PROV_TYPE:
        pm.QualifiedName(pm.Namespace("nidm", Constants.NIDM),
                         "NorthAmericanAdultReadingTest")
    }
    acq_obj = AssessmentObject(acq, attributes=kwargs)

    acq2 = AssessmentAcquisition(session)

    kwargs = {
        pm.PROV_TYPE:
        pm.QualifiedName(pm.Namespace("nidm", Constants.NIDM),
                         "PositiveAndNegativeSyndromeScale")
    }
    acq_obj2 = AssessmentObject(acq2, attributes=kwargs)

    #save a turtle file
    with open("test_gpi.ttl", 'w') as f:
        f.write(project.serializeTurtle())

    assessment_list = Query.GetProjectInstruments(["test_gpi.ttl"], proj_uuid)

    remove("test_gpi.ttl")

    assert Constants.NIDM + "NorthAmericanAdultReadingTest" in [
        str(x) for x in assessment_list['assessment_type'].to_list()
    ]
    assert Constants.NIDM + "PositiveAndNegativeSyndromeScale" in [
        str(x) for x in assessment_list['assessment_type'].to_list()
    ]
Beispiel #15
0
def main(argv):
    parser = ArgumentParser(description='This program will load in a CSV file and iterate over the header \
     variable names performing an elastic search of https://scicrunch.org/ for NIDM-ReproNim \
     tagged terms that fuzzy match the variable names.  The user will then interactively pick \
     a term to associate with the variable name.  The resulting annotated CSV data will \
     then be written to a NIDM data file.  Note, you must obtain an API key to Interlex by signing up \
     for an account at scicrunch.org then going to My Account and API Keys.  Then set the environment \
     variable INTERLEX_API_KEY with your key.')

    parser.add_argument('-csv', dest='csv_file', required=True, help="Full path to CSV file to convert")
    # parser.add_argument('-ilxkey', dest='key', required=True, help="Interlex/SciCrunch API key to use for query")
    parser.add_argument('-json_map', dest='json_map',required=False,help="Full path to user-suppled JSON file containing variable-term mappings.")
    parser.add_argument('-nidm', dest='nidm_file', required=False, help="Optional full path of NIDM file to add CSV->NIDM converted graph to")
    parser.add_argument('-no_concepts', action='store_true', required=False, help='If this flag is set then no concept associations will be'
                                'asked of the user.  This is useful if you already have a -json_map specified without concepts and want to'
                                'simply run this program to get a NIDM file with user interaction to associate concepts.')
    # parser.add_argument('-owl', action='store_true', required=False, help='Optionally searches NIDM OWL files...internet connection required')
    # parser.add_argument('-png', action='store_true', required=False, help='Optional flag, when set a PNG image file of RDF graph will be produced')
    # parser.add_argument('-jsonld', action='store_true', required=False, help='Optional flag, when set NIDM files are saved as JSON-LD instead of TURTLE')
    parser.add_argument('-log','--log', dest='logfile',required=False, default=None, help="full path to directory to save log file. Log file name is csv2nidm_[arg.csv_file].log")
    parser.add_argument('-out', dest='output_file', required=True, help="Full path with filename to save NIDM file")
    args = parser.parse_args()



    #open CSV file and load into
    df = pd.read_csv(args.csv_file)
    #temp = csv.reader(args.csv_file)
    #df = pd.DataFrame(temp)

    #maps variables in CSV file to terms
    #if args.owl is not False:
    #    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map, owl_file=args.owl)
    #else:
    # if user did not specify -no_concepts then associate concepts interactively with user
    if not args.no_concepts:
        column_to_terms, cde = map_variables_to_terms(df=df,  assessment_name=basename(args.csv_file),directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map)
    # run without concept mappings
    else:
        column_to_terms, cde = map_variables_to_terms(df=df, assessment_name=basename(args.csv_file),
                                                      directory=dirname(args.output_file), output_file=args.output_file,
                                                      json_file=args.json_map, associate_concepts=False)

    if args.logfile is not None:
        logging.basicConfig(filename=join(args.logfile,'csv2nidm_' + os.path.splitext(os.path.basename(args.csv_file))[0] + '.log'), level=logging.DEBUG)
        # add some logging info
        logging.info("csv2nidm %s" %args)


    #If user has added an existing NIDM file as a command line parameter then add to existing file for subjects who exist in the NIDM file
    if args.nidm_file:
        print("Adding to NIDM file...")
        #read in NIDM file
        project = read_nidm(args.nidm_file)
        #get list of session objects
        session_objs=project.get_sessions()

        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                key_tuple = eval(key)
                #id_field=key
                id_field = key_tuple.variable
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                df = pd.read_csv(args.csv_file,dtype={id_field : str})
                break

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            # Make sure user selected one of the options.  If not present user with selection input again
            while (not selection.isdigit()) or (int(selection) > int(option)):
                # Wait for user input
                selection = input("Please select the subject ID field from the list above: \t" % option)
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            df = pd.read_csv(args.csv_file,dtype={id_field : str})



        #use RDFLib here for temporary graph making query easier
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')

        print("Querying for existing participants in NIDM graph....")
        #find subject ids and sessions in NIDM document
        query = """SELECT DISTINCT ?session ?nidm_subj_id ?agent
                    WHERE {
                        ?activity prov:wasAssociatedWith ?agent ;
                            dct:isPartOf ?session  .
                        ?agent rdf:type prov:Agent ;
                            ndar:src_subject_id ?nidm_subj_id .
                    }"""
        #print(query)
        qres = rdf_graph.query(query)


        for row in qres:
            logging.info("found existing participant %s \t %s" %(row[0],row[1]))
            #find row in CSV file with subject id matching agent from NIDM file

            #csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
            #find row in CSV file with matching subject id to the agent in the NIDM file
            #be carefull about data types...simply type-change dataframe subject id column and query to strings.
            #here we're removing the leading 0's from IDs because pandas.read_csv strips those unless you know ahead of
            #time which column is the subject id....
            csv_row = df.loc[df[id_field].astype('str').str.contains(str(row[1]).lstrip("0"))]

            #if there was data about this subject in the NIDM file already (i.e. an agent already exists with this subject id)
            #then add this CSV assessment data to NIDM file, else skip it....
            if (not (len(csv_row.index)==0)):

                #NIDM document sesssion uuid
                session_uuid = row[0]

                #temporary list of string-based URIs of session objects from API
                temp = [o.identifier._uri for o in session_objs]
                #get session object from existing NIDM file that is associated with a specific subject id
                #nidm_session = (i for i,x in enumerate([o.identifier._uri for o in session_objs]) if x == str(session_uuid))
                nidm_session = session_objs[temp.index(str(session_uuid))]
                #for nidm_session in session_objs:
                #    if nidm_session.identifier._uri == str(session_uuid):
                #add an assessment acquisition for the phenotype data to session and associate with agent
                acq=AssessmentAcquisition(session=nidm_session)
                #add acquisition entity for assessment
                acq_entity = AssessmentObject(acquisition=acq)
                #add qualified association with existing agent
                acq.add_qualified_association(person=row[2],role=Constants.NIDM_PARTICIPANT)

                # add git-annex info if exists
                num_sources = addGitAnnexSources(obj=acq_entity,filepath=args.csv_file,bids_root=dirname(args.csv_file))
                # if there aren't any git annex sources then just store the local directory information
                if num_sources == 0:
                    # WIP: add absolute location of BIDS directory on disk for later finding of files
                    acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + args.csv_file})

                # store file to acq_entity
                acq_entity.add_attributes({Constants.NIDM_FILENAME:basename(args.csv_file)})

                #store other data from row with columns_to_term mappings
                for row_variable in csv_row:
                    #check if row_variable is subject id, if so skip it
                    if row_variable==id_field:
                        continue
                    else:
                        if not csv_row[row_variable].values[0]:
                            continue


                        add_attributes_with_cde(acq_entity, cde, row_variable, csv_row[row_variable].values[0])



                continue

        print ("Adding CDEs to graph....")
        # convert to rdflib Graph and add CDEs
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')
        rdf_graph = rdf_graph + cde

        print("Backing up original NIDM file...")
        copy2(src=args.nidm_file,dst=args.nidm_file+".bak")
        print("Writing NIDM file....")
        rdf_graph.serialize(destination=args.nidm_file,format='turtle')

    else:
        print("Creating NIDM file...")
        #If user did not choose to add this data to an existing NIDM file then create a new one for the CSV data
        #create empty project
        project=Project()

        #simply add name of file to project since we don't know anything about it
        project.add_attributes({Constants.NIDM_FILENAME:args.csv_file})


        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            # using skos:sameAs relationship to associate subject identifier variable from csv with a known term
            # for subject IDs
            if 'sameAs' in column_to_terms[key]:
                if Constants.NIDM_SUBJECTID.uri == column_to_terms[key]['sameAs']:
                    key_tuple = eval(key)
                    id_field=key_tuple.variable
                    #make sure id_field is a string for zero-padded subject ids
                    #re-read data file with constraint that key field is read as string
                    df = pd.read_csv(args.csv_file,dtype={id_field : str})
                    break

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            # Make sure user selected one of the options.  If not present user with selection input again
            while (not selection.isdigit()) or (int(selection) > int(option)):
                # Wait for user input
                selection = input("Please select the subject ID field from the list above: \t" % option)
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            df = pd.read_csv(args.csv_file,dtype={id_field : str})


        #iterate over rows and store in NIDM file
        for csv_index, csv_row in df.iterrows():
            #create a session object
            session=Session(project)

            #create and acquisition activity and entity
            acq=AssessmentAcquisition(session)
            acq_entity=AssessmentObject(acq)

            #create prov:Agent for subject
            #acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row['participant_id']}))

            # add git-annex info if exists
            num_sources = addGitAnnexSources(obj=acq_entity,filepath=args.csv_file,bids_root=os.path.dirname(args.csv_file))
            # if there aren't any git annex sources then just store the local directory information
            if num_sources == 0:
                # WIP: add absolute location of BIDS directory on disk for later finding of files
                acq_entity.add_attributes({Constants.PROV['Location']:"file:/" + args.csv_file})

            # store file to acq_entity
            acq_entity.add_attributes({Constants.NIDM_FILENAME : basename(args.csv_file)})


            #store other data from row with columns_to_term mappings
            for row_variable,row_data in csv_row.iteritems():
                if not row_data:
                    continue

                #check if row_variable is subject id, if so skip it
                if row_variable==id_field:
                    ### WIP: Check if agent already exists with the same ID.  If so, use it else create a new agent

                    #add qualified association with person
                    acq.add_qualified_association(person= acq.add_person(attributes=({Constants.NIDM_SUBJECTID:str(row_data)})),role=Constants.NIDM_PARTICIPANT)

                    continue
                else:
                    add_attributes_with_cde(acq_entity, cde, row_variable, row_data)

                    #print(project.serializeTurtle())

        # convert to rdflib Graph and add CDEs
        rdf_graph = Graph()
        rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')
        rdf_graph = rdf_graph + cde

        print("Writing NIDM file....")
        rdf_graph.serialize(destination=args.output_file,format='turtle')
Beispiel #16
0
def main(argv):
    parser = ArgumentParser()

    parser.add_argument('-d',
                        dest='directory',
                        required=True,
                        help="Path to BIDS dataset directory")
    parser.add_argument('-o',
                        dest='outputfile',
                        default="nidm.ttl",
                        help="NIDM output turtle file")
    args = parser.parse_args()

    directory = args.directory
    outputfile = args.outputfile

    #Parse dataset_description.json file in BIDS directory
    with open(directory + '/' + 'dataset_description.json') as data_file:
        dataset = json.load(data_file)
    #print(dataset_data)

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #print(key)
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    with open(directory + '/' + 'participants.tsv') as csvfile:
        participants_data = csv.DictReader(csvfile, delimiter='\t')
        #print(participants_data.fieldnames)
        for row in participants_data:
            #create session object for subject to be used for participant metadata and image data
            #parse subject id from "sub-XXXX" string
            subjid = row['participant_id'].split("-")
            session[subjid[1]] = Session(project)

            #add acquisition object
            acq = Acquisition(session=session[subjid[1]])
            acq_entity = DemographicsAcquisitionObject(acquisition=acq)
            participant = acq.add_person(role=Constants.NIDM_PARTICIPANT,
                                         attributes=({
                                             Constants.NIDM_SUBJECTID:
                                             row['participant_id']
                                         }))

            for key, value in row.items():
                #for now only convert variables in participants.tsv file who have term mappings in BIDS_Constants.py
                if key in BIDS_Constants.participants:
                    acq_entity.add_attributes(
                        {BIDS_Constants.participants[key]: value})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = Acquisition(session[subject_id])

            #print(file_tpl.type)
            if file_tpl.modality == 'anat':
                #do something with anatomicals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes(
                    {Constants.NFO["filename"]: file_tpl.filename})
                #get associated JSON file if exists
                for json_file in bids_layout.get(subject=subject_id,
                                                 extensions=['.json'],
                                                 modality=file_tpl.modality):
                    #open json file, grab key-value pairs, map them to terms and add to acquisition object
                    with open(json_file[0]) as data_file:
                        json_data = json.load(data_file)
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                project.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    "".join(json_data[key])
                                })
                            else:
                                project.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })
                #if we want to do something further if T1w or t2, etc
                #if file_tpl.type == 'T1w':
                #elif file_tpl.type == 'inplaneT2':
            elif file_tpl.modality == 'func':
                #do something with functionals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes({
                    Constants.NFO["filename"]:
                    file_tpl.filename,
                    BIDS_Constants.json_keys["run"]:
                    file_tpl.run
                })
                #add attributes for task description keys from task JSON file
                for task_desc in bids_layout.get(extensions=['.json'],
                                                 task=file_tpl.task):
                    with open(task_desc[0]) as data_file:
                        json_data = json.load(data_file)
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                json_data[key]
                            })
                    #get associated events TSV file
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task,
                                                  run=file_tpl.run)
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file
                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NFO["filename"]:
                        events_file[0].filename
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

            elif file_tpl.modality == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes({
                    Constants.NFO["filename"]:
                    file_tpl.filename,
                    BIDS_Constants.json_keys["run"]:
                    file_tpl.run
                })
                #add attributes for task description keys from task JSON file
                for task_desc in bids_layout.get(extensions=['.json'],
                                                 task=file_tpl.task):
                    with open(task_desc[0]) as data_file:
                        json_data = json.load(data_file)
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            acq_obj.add_attributes({
                                BIDS_Constants.json_keys[key]:
                                json_data[key]
                            })
                #for bval and bvec files, what to do with those?
                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj = AcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                for bval in bids_layout.get(extensions=['.bval'],
                                            task=file_tpl.task):
                    #add file link
                    acq_obj.add_attributes(
                        {Constants.NFO["filename"]: bval.filename})
                for bvec in bids_layout.get(extensions=['.bvec'],
                                            task=file_tpl.task):
                    #add file link
                    acq_obj.add_attributes(
                        {Constants.NFO["filename"]: bvec.filename})

    #serialize graph
    #print(project.graph.get_provn())
    with open(outputfile, 'w') as f:
        f.write(project.serializeTurtle())
        #f.write(project.graph.get_provn())
    #save a DOT graph as PNG
    project.save_DotGraph(str(outputfile + ".png"), format="png")
Beispiel #17
0
def main(argv):
    #create new nidm-experiment document with project
    kwargs={Constants.NIDM_PROJECT_NAME:"FBIRN_PhaseII",Constants.NIDM_PROJECT_IDENTIFIER:9610,Constants.NIDM_PROJECT_DESCRIPTION:"Test investigation"}
    project = Project(attributes=kwargs)

    
    #test add string attribute with existing namespace
    #nidm_doc.addLiteralAttribute("nidm","isFun","ForMe")
    project.add_attributes({Constants.NIDM["isFun"]:"ForMe"})

    #test adding string attribute with new namespace/term
    project.addLiteralAttribute("fred","notFound","in namespaces","www.fred.org/")

    #test add float attribute
    project.addLiteralAttribute("nidm", "float", float(2.34))

    #test adding attributes in bulk with mix of existing and new namespaces
    #nidm_doc.addAttributesWithNamespaces(nidm_doc.getProject(),[{"prefix":"nidm", "uri":nidm_doc.namespaces["nidm"], "term":"score", "value":int(15)}, \
        #                                              {"prefix":"dave", "uri":"http://www.davidkeator.com/", "term":"isAwesome", "value":"15"}, \
        #                                              {"prefix":"nidm", "uri":nidm_doc.namespaces["nidm"], "term":"value", "value":float(2.34)}])
    
    #nidm_doc.addAttributes(nidm_doc.getProject(),{"nidm:test":int(15), "ncit:isTerminology":"15","ncit:joker":float(1)})


    #test add PI to investigation
    project_PI = project.add_person(attributes={Constants.NIDM_FAMILY_NAME:"Keator", Constants.NIDM_GIVEN_NAME:"David"})

    #add qualified association of project PI to project activity
    project.add_qualified_association(person=project_PI,role=Constants.NIDM_PI)

    #test add session to graph and associate with project
    session = Session(project)
    session.add_attributes({Constants.NIDM:"test"})
    #project.add_sessions(session)

    #test add MR acquisition activity / entity to graph and associate with session
    acq_act = MRAcquisition(session=session)
    #test add acquisition object entity to graph associated with participant role NIDM_PARTICIPANT
    acq_entity = MRObject(acquisition=acq_act)

    #add person to graph
    person = acq_act.add_person(attributes={Constants.NIDM_GIVEN_NAME:"George"})
    #add qualified association of person with role NIDM_PARTICIPANT, and associated with acquistion activity
    acq_act.add_qualified_association(person=person, role=Constants.NIDM_PARTICIPANT)


    #test add Assessment acquisition activity / entity to graph and associate with session
    acq_act = AssessmentAcquisition(session=session)
    #test add acquisition object entity to graph associated with participant role NIDM_PARTICIPANT
    acq_entity = AssessmentObject(acquisition=acq_act)
    acq_entity.add_attributes({Constants.NIDM["Q1"]:"Q1 Answer",Constants.NIDM["Q2"]:"Q2 Answer" })
    #associate person as participant
    acq_act.add_qualified_association(person=person, role=Constants.NIDM_PARTICIPANT)


    #test add DemographicsAssessment acquisition activity / entity to graph and associate with session
    acq_act = AssessmentAcquisition(session=session)
    #test add acquisition object entity to graph associated with participant role NIDM_PARTICIPANT
    acq_entity = DemographicsObject(acquisition=acq_act)
    #add new person to graph
    person2 = acq_act.add_person(attributes={Constants.NIDM_FAMILY_NAME:"Doe", \
            Constants.NIDM_GIVEN_NAME:"John"})
    #associate person2 with assessment acquisition
    acq_act.add_qualified_association(person=person2, role=Constants.NIDM_PARTICIPANT)

    acq_entity.add_attributes({Constants.NIDM_AGE:60,Constants.NIDM_GENDER:"Male" })


    #save a turtle file
    with open("test.ttl",'w') as f:
        f.write (project.serializeTurtle())
Beispiel #18
0
def main(argv):
    parser = ArgumentParser(
        description=
        'This program will convert a BIDS MRI dataset to a NIDM-Experiment \
        RDF document.  It will parse phenotype information and simply store variables/values \
        and link to the associated json data dictionary file.')

    parser.add_argument('-d',
                        dest='directory',
                        required=True,
                        help="Path to BIDS dataset directory")
    parser.add_argument('-o',
                        dest='outputfile',
                        default="nidm.ttl",
                        help="NIDM output turtle file")
    args = parser.parse_args()

    directory = args.directory
    outputfile = args.outputfile

    #importlib.reload(sys)
    #sys.setdefaultencoding('utf8')

    #Parse dataset_description.json file in BIDS directory
    with open(os.path.join(directory,
                           'dataset_description.json')) as data_file:
        dataset = json.load(data_file)
    #print(dataset_data)

    #create project / nidm-exp doc
    project = Project()

    #add various attributes if they exist in BIDS dataset
    for key in dataset:
        #print(key)
        #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object
        if key in BIDS_Constants.dataset_description:
            if type(dataset[key]) is list:
                project.add_attributes({
                    BIDS_Constants.dataset_description[key]:
                    "".join(dataset[key])
                })
            else:
                project.add_attributes(
                    {BIDS_Constants.dataset_description[key]: dataset[key]})

    #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics
    session = {}
    #Parse participants.tsv file in BIDS directory and create study and acquisition objects
    with open(os.path.join(directory, 'participants.tsv')) as csvfile:
        participants_data = csv.DictReader(csvfile, delimiter='\t')
        #print(participants_data.fieldnames)
        for row in participants_data:
            #create session object for subject to be used for participant metadata and image data
            #parse subject id from "sub-XXXX" string
            subjid = row['participant_id'].split("-")
            session[subjid[1]] = Session(project)

            #add acquisition object
            acq = Acquisition(session=session[subjid[1]])
            acq_entity = DemographicsAcquisitionObject(acquisition=acq)
            participant = acq.add_person(role=Constants.NIDM_PARTICIPANT,
                                         attributes=({
                                             Constants.NIDM_SUBJECTID:
                                             row['participant_id']
                                         }))

            for key, value in row.items():
                #for now only convert variables in participants.tsv file who have term mappings in BIDS_Constants.py
                if key in BIDS_Constants.participants:
                    acq_entity.add_attributes(
                        {BIDS_Constants.participants[key]: value})

    #get BIDS layout
    bids_layout = BIDSLayout(directory)

    #create acquisition objects for each scan for each subject

    #loop through all subjects in dataset
    for subject_id in bids_layout.get_subjects():
        #skip .git directories...added to support datalad datasets
        if subject_id.startswith("."):
            continue
        for file_tpl in bids_layout.get(subject=subject_id,
                                        extensions=['.nii', '.nii.gz']):
            #create an acquisition activity
            acq = Acquisition(session[subject_id])

            #print(file_tpl.type)
            if file_tpl.modality == 'anat':
                #do something with anatomicals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                #make relative link to
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)
                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })
            elif file_tpl.modality == 'func':
                #do something with functionals
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})

                #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })

                #get associated events TSV file
                if 'run' in file_tpl._fields:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task,
                                                  run=file_tpl.run)
                else:
                    events_file = bids_layout.get(subject=subject_id,
                                                  extensions=['.tsv'],
                                                  modality=file_tpl.modality,
                                                  task=file_tpl.task)
                #if there is an events file then this is task-based so create an acquisition object for the task file and link
                if events_file:
                    #for now create acquisition object and link it to the associated scan
                    events_obj = AcquisitionObject(acq)
                    #add prov type, task name as prov:label, and link to filename of events file
                    events_obj.add_attributes({
                        PROV_TYPE:
                        Constants.NIDM_MRI_BOLD_EVENTS,
                        BIDS_Constants.json_keys["TaskName"]:
                        json_data["TaskName"],
                        Constants.NFO["filename"]:
                        events_file[0].filename
                    })
                    #link it to appropriate MR acquisition entity
                    events_obj.wasAttributedTo(acq_obj)

            elif file_tpl.modality == 'dwi':
                #do stuff with with dwi scans...
                acq_obj = MRAcquisitionObject(acq)
                acq_obj.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]})
                #add file link
                acq_obj.add_attributes(
                    {Constants.NIDM_FILENAME: file_tpl.filename})
                if 'run' in file_tpl._fields:
                    acq_obj.add_attributes(
                        {BIDS_Constants.json_keys["run"]: file_tpl.run})
                    #get associated JSON file if exists
                json_data = bids_layout.get_metadata(file_tpl.filename)

                if json_data:
                    for key in json_data:
                        if key in BIDS_Constants.json_keys:
                            if type(json_data[key]) is list:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    ''.join(str(e) for e in json_data[key])
                                })
                            else:
                                acq_obj.add_attributes({
                                    BIDS_Constants.json_keys[key]:
                                    json_data[key]
                                })

                #for bval and bvec files, what to do with those?
                #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan?
                acq_obj_bval = AcquisitionObject(acq)
                acq_obj_bval.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bval"]})
                #add file link to bval files
                acq_obj_bval.add_attributes({
                    Constants.NIDM_FILENAME:
                    bids_layout.get_bval(file_tpl.filename)
                })
                acq_obj_bvec = AcquisitionObject(acq)
                acq_obj_bvec.add_attributes(
                    {PROV_TYPE: BIDS_Constants.scans["bvec"]})
                #add file link to bvec files
                acq_obj_bvec.add_attributes({
                    Constants.NIDM_FILENAME:
                    bids_layout.get_bvec(file_tpl.filename)
                })
                #link bval and bvec acquisition object entities together or is their association with enclosing activity enough?

        #Added temporarily to support phenotype files
        #for each *.tsv / *.json file pair in the phenotypes directory
        for tsv_file in glob.glob(os.path.join(directory, "phenotype",
                                               "*.tsv")):
            #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to
            #the associated JSON data dictionary file
            with open(tsv_file) as phenofile:
                pheno_data = csv.DictReader(phenofile, delimiter='\t')
                for row in pheno_data:
                    subjid = row['participant_id'].split("-")
                    if not subjid[1] == subject_id:
                        continue
                    else:
                        #add acquisition object
                        acq = Acquisition(session=session[subjid[1]])
                        acq_entity = AssessmentAcquisitionObject(
                            acquisition=acq)
                        participant = acq.add_person(
                            role=Constants.NIDM_PARTICIPANT,
                            attributes=({
                                Constants.NIDM_SUBJECTID:
                                row['participant_id']
                            }))

                        for key, value in row.items():
                            if not key == "participant_id":
                                #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs..
                                acq_entity.add_attributes(
                                    {Constants.BIDS[key]: value})

                        #link TSV file
                        acq_entity.add_attributes(
                            {Constants.NIDM_FILENAME: tsv_file})
                        #link associated JSON file if it exists
                        data_dict = os.path.join(
                            directory, "phenotype",
                            os.path.splitext(os.path.basename(tsv_file))[0] +
                            ".json")
                        if os.path.isfile(data_dict):
                            acq_entity.add_attributes(
                                {Constants.BIDS["data_dictionary"]: data_dict})

    #serialize graph
    #print(project.graph.get_provn())
    with open(outputfile, 'w') as f:
        f.write(project.serializeTurtle())
        #f.write(project.graph.get_provn())
    #save a DOT graph as PNG
    project.save_DotGraph(str(outputfile + ".png"), format="png")
Beispiel #19
0
def main(argv):
    parser = ArgumentParser(description='This program will load in a CSV file and iterate over the header \
     variable names performing an elastic search of https://scicrunch.org/ for NIDM-ReproNim \
     tagged terms that fuzzy match the variable names.  The user will then interactively pick \
     a term to associate with the variable name.  The resulting annotated CSV data will \
     then be written to a NIDM data file.')

    parser.add_argument('-csv', dest='csv_file', required=True, help="Path to CSV file to convert")
    parser.add_argument('-ilxkey', dest='key', required=True, help="Interlex/SciCrunch API key to use for query")
    parser.add_argument('-json_map', dest='json_map',required=False,help="User-suppled JSON file containing variable-term mappings.")
    parser.add_argument('-nidm', dest='nidm_file', required=False, help="Optional NIDM file to add CSV->NIDM converted graph to")
    #parser.add_argument('-owl', action='store_true', required=False, help='Optionally searches NIDM OWL files...internet connection required')
    parser.add_argument('-png', action='store_true', required=False, help='Optional flag, when set a PNG image file of RDF graph will be produced')
    parser.add_argument('-jsonld', action='store_true', required=False, help='Optional flag, when set NIDM files are saved as JSON-LD instead of TURTLE')
    parser.add_argument('-out', dest='output_file', required=True, help="Filename to save NIDM file")
    args = parser.parse_args()

    #open CSV file and load into
    df = pd.read_csv(args.csv_file)

    #maps variables in CSV file to terms
    #if args.owl is not False:
    #    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map, owl_file=args.owl)
    #else:
    column_to_terms = map_variables_to_terms(df=df, apikey=args.key, directory=dirname(args.output_file), output_file=args.output_file, json_file=args.json_map)



    #If user has added an existing NIDM file as a command line parameter then add to existing file for subjects who exist in the NIDM file
    if args.nidm_file:
        print("Adding to NIDM file...")
        #read in NIDM file
        project = read_nidm(args.nidm_file)
        #get list of session objects
        session_objs=project.get_sessions()

        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                id_field=key
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                #df = pd.read_csv(args.csv_file,dtype={id_field : str})

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            id_field=df.columns[int(selection)-1]
            #make sure id_field is a string for zero-padded subject ids
            #re-read data file with constraint that key field is read as string
            #df = pd.read_csv(args.csv_file,dtype={id_field : str})



        #use RDFLib here for temporary graph making query easier
        rdf_graph = Graph()
        rdf_graph_parse = rdf_graph.parse(source=StringIO(project.serializeTurtle()),format='turtle')

        #find subject ids and sessions in NIDM document
        query = """SELECT DISTINCT ?session ?nidm_subj_id ?agent
                    WHERE {
                        ?activity prov:wasAssociatedWith ?agent ;
                            dct:isPartOf ?session  .
                        ?agent rdf:type prov:Agent ;
                            ndar:src_subject_id ?nidm_subj_id .
                    }"""
        #print(query)
        qres = rdf_graph_parse.query(query)


        for row in qres:
            print('%s \t %s' %(row[0],row[1]))
            #find row in CSV file with subject id matching agent from NIDM file

            #csv_row = df.loc[df[id_field]==type(df[id_field][0])(row[1])]
            #find row in CSV file with matching subject id to the agent in the NIDM file
            #be carefull about data types...simply type-change dataframe subject id column and query to strings.
            #here we're removing the leading 0's from IDs because pandas.read_csv strips those unless you know ahead of
            #time which column is the subject id....
            csv_row = df.loc[df[id_field].astype('str').str.contains(str(row[1]).lstrip("0"))]

            #if there was data about this subject in the NIDM file already (i.e. an agent already exists with this subject id)
            #then add this CSV assessment data to NIDM file, else skip it....
            if (not (len(csv_row.index)==0)):

                #NIDM document sesssion uuid
                session_uuid = row[0]

                #temporary list of string-based URIs of session objects from API
                temp = [o.identifier._uri for o in session_objs]
                #get session object from existing NIDM file that is associated with a specific subject id
                #nidm_session = (i for i,x in enumerate([o.identifier._uri for o in session_objs]) if x == str(session_uuid))
                nidm_session = session_objs[temp.index(str(session_uuid))]
                #for nidm_session in session_objs:
                #    if nidm_session.identifier._uri == str(session_uuid):
                #add an assessment acquisition for the phenotype data to session and associate with agent
                acq=AssessmentAcquisition(session=nidm_session)
                #add acquisition entity for assessment
                acq_entity = AssessmentObject(acquisition=acq)
                #add qualified association with existing agent
                acq.add_qualified_association(person=row[2],role=Constants.NIDM_PARTICIPANT)

                #store other data from row with columns_to_term mappings
                for row_variable in csv_row:
                    #check if row_variable is subject id, if so skip it
                    if row_variable==id_field:
                        continue
                    else:
                        if not csv_row[row_variable].values[0]:
                            continue
                        #get column_to_term mapping uri and add as namespace in NIDM document
                        #provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"])
                        acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"]), ""):csv_row[row_variable].values[0]})
                continue

        #serialize NIDM file
        with open(args.nidm_file,'w') as f:
            print("Writing NIDM file...")
            if args.jsonld:
                f.write(project.serializeJSONLD())
            else:
                f.write(project.serializeTurtle())

            project.save_DotGraph(str(args.nidm_file + ".png"), format="png")



    else:
        print("Creating NIDM file...")
        #If user did not choose to add this data to an existing NIDM file then create a new one for the CSV data
        #create empty project
        project=Project()

        #simply add name of file to project since we don't know anything about it
        project.add_attributes({Constants.NIDM_FILENAME:args.csv_file})


        #look at column_to_terms dictionary for NIDM URL for subject id  (Constants.NIDM_SUBJECTID)
        id_field=None
        for key, value in column_to_terms.items():
            if Constants.NIDM_SUBJECTID._str == column_to_terms[key]['label']:
                id_field=key
                #make sure id_field is a string for zero-padded subject ids
                #re-read data file with constraint that key field is read as string
                #df = pd.read_csv(args.csv_file,dtype={id_field : str})

        #if we couldn't find a subject ID field in column_to_terms, ask user
        if id_field is None:
            option=1
            for column in df.columns:
                print("%d: %s" %(option,column))
                option=option+1
            selection=input("Please select the subject ID field from the list above: ")
            id_field=df.columns[int(selection)-1]


        #iterate over rows and store in NIDM file
        for csv_index, csv_row in df.iterrows():
            #create a session object
            session=Session(project)

            #create and acquisition activity and entity
            acq=AssessmentAcquisition(session)
            acq_entity=AssessmentObject(acq)



            #store other data from row with columns_to_term mappings
            for row_variable,row_data in csv_row.iteritems():
                if not row_data:
                    continue
                #check if row_variable is subject id, if so skip it
                if row_variable==id_field:
                    #add qualified association with person
                    acq.add_qualified_association(person= acq.add_person(attributes=({Constants.NIDM_SUBJECTID:row_data})),role=Constants.NIDM_PARTICIPANT)

                    continue
                else:
                    #get column_to_term mapping uri and add as namespace in NIDM document
                    acq_entity.add_attributes({QualifiedName(provNamespace(Core.safe_string(None,string=str(row_variable)), column_to_terms[row_variable]["url"]),""):row_data})
                    #print(project.serializeTurtle())

        #serialize NIDM file
        with open(args.output_file,'w') as f:
            print("Writing NIDM file...")
            if args.jsonld:
                f.write(project.serializeJSONLD())
            else:
                f.write(project.serializeTurtle())
            if args.png:
                project.save_DotGraph(str(args.output_file + ".png"), format="png")