Exemplo n.º 1
0
def validateMatlabPython(subjectNum, subjectDay):
    dataPath = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
    configFile = dataPath + 'subject' + str(
        subjectNum) + '/usedscripts/PennCfg_Day' + str(subjectDay) + '.toml'
    cfg = loadConfigFile(configFile)
    #subjectDayDir = getSubjectDayDir(cfg.session.subjectNum, cfg.session.subjectDay)
    subjectDayDir = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(
        cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
    matDataDir = subjectDayDir  #os.path.join(cfg.session.dataDir, subjectDayDir)
    pyDataDir = matDataDir
    all_vals = np.zeros((200, 2, len(cfg.session.Runs)))
    for runId in cfg.session.Runs:
        print("EXECUTING ANALYSES FOR RUN {}".format(runId))
        #validatePatternsData(matDataDir, pyDataDir, runId)
        mat_cs, py_cs = crossvalidateModels(matDataDir, pyDataDir, runId)
        # 200 TRs for each run --> want to plot
        all_vals[:, 0, runId - 1] = mat_cs
        all_vals[:, 1, runId - 1] = py_cs
    all_mat_ev = np.reshape(all_vals[:, 0, :],
                            (len(cfg.session.Runs) * 200, 1))
    all_py_ev = np.reshape(all_vals[:, 1, :], (len(cfg.session.Runs) * 200, 1))
    fix, ax = plt.subplots(figsize=(12, 7))
    plt.plot(all_mat_ev, all_py_ev, '.')
    plt.plot([-5, 5], [-5, 5], '--k')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.xlabel('MATLAB')
    plt.ylabel('PYTHON')
    plt.xlim([-1.5, 1.5])
    plt.ylim([-1.5, 1.5])
    plt.show()
Exemplo n.º 2
0
def WebMain(params):
    installLoggers(logging.INFO, logging.INFO, filename='logs/webServer.log')
    cfg = loadConfigFile(params.experiment)

    if cfg.experiment.model == 'rtAtten':
        # call starts web server thread and doesn't return
        rtAttenWeb = RtAttenWeb()
        rtAttenWeb.init(params, cfg)
    else:
        print('Model {}: unsupported or not specified'.format(
            cfg.experiment.model))
Exemplo n.º 3
0
def ClientMain(params):
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename='logs/rtAttenClient.log')

    webpipes = None
    if params.webpipe is not None:
        # This process was opened by a webserver which will communicate using webpipes.
        # Open the in and out named pipes and pass to RtAttenClient for communication
        # with the webserver process. Pipe.Open() blocks until the other end opens
        # it as well. Therefore open the reader first here and the writer
        # first within the webserver.
        webpipes = StructDict()
        webpipes.name_in = params.webpipe + '.toclient'
        webpipes.name_out = params.webpipe + '.fromclient'
        webpipes.fd_in = open(webpipes.name_in, mode='r')
        webpipes.fd_out = open(webpipes.name_out, mode='w', buffering=1)
        # Create a thread which will detect if the parent process exited by
        #  reading from stdin, when stdin is closed exit this process
        exitThread = threading.Thread(name='exitThread',
                                      target=processShouldExitThread,
                                      args=(params, ))
        exitThread.setDaemon(True)
        exitThread.start()

    cfg = loadConfigFile(params.experiment)
    params = mergeParamsConfigs(params, cfg)

    # Start local server if requested
    if params.run_local is True:
        startLocalServer(params.port)

    # run based on config file and passed in options
    client: RtfMRIClient  # define a new variable of type RtfMRIClient
    if params.cfg.experiment.model == 'base':
        client = BaseClient()
    elif params.cfg.experiment.model == 'rtAtten':
        client = RtAttenClient()
        if params.webpipe is not None:
            client.setWeb(webpipes, params.webfilesremote)
    else:
        raise InvocationError("Unsupported model %s" %
                              (params.cfg.experiment.model))
    try:
        client.runSession(params.addr, params.port, params.cfg)
    except Exception as err:
        print(err)
        traceback_str = ''.join(traceback.format_tb(err.__traceback__))
        print(traceback_str)

    if params.run_local is True:
        stopLocalServer(params)

    return True
Exemplo n.º 4
0
def validateMatlabPython(configFile):
    cfg = loadConfigFile(configFile)
    matDataDir = getSubjectDataDir(cfg.session.dataDir, cfg.session.subjectNum,
                                   cfg.session.subjectDay)
    pyDataDir = matDataDir
    all_ROC = np.zeros((4, 2, len(cfg.session.Runs)))
    for runId in cfg.session.Runs:
        print("EXECUTING ANALYSES FOR RUN {}".format(runId))
        validatePatternsData(matDataDir, pyDataDir, runId)
        validateFileprocessingTxt(matDataDir, pyDataDir, runId)
        mat_roc, py_roc = crossvalidateModels(matDataDir, pyDataDir, runId)
        all_ROC[:, 0, runId - 1] = mat_roc
        all_ROC[:, 1, runId - 1] = py_roc
    fullfilename = matDataDir + '/' + 'xvalresults.npy'
    print("saving to %s\n" % fullfilename)
    np.save(fullfilename, all_ROC)
Exemplo n.º 5
0
    def setup_class(cls):
        cfgFilePath = getCfgFileFullPath()
        print("## Init TestDeadlines ##")
        # generate data if needed
        print("## Generate Data if needed ##")
        gd.generate_data(cfgFilePath)

        # Start Server
        cls.server = threading.Thread(name='server',
                                      target=ServerMain,
                                      args=(cls.serverPort, 20))
        cls.server.setDaemon(True)
        cls.server.start()
        time.sleep(0.5)

        # Start client
        cls.cfg = loadConfigFile(cfgFilePath)
        cls.client = RtAttenClient()
        cls.client.connect('localhost', cls.serverPort)
        cls.client.initSession(cls.cfg)

        # Run Client Until first TR
        runId = cls.cfg.session.Runs[0]
        scanNum = cls.cfg.session.ScanNums[0]
        subjectDataDir = getSubjectDataDir(cls.cfg.session.dataDir,
                                           cls.cfg.session.subjectNum,
                                           cls.cfg.session.subjectDay)
        patterns, _ = getLocalPatternsFile(cls.cfg.session, subjectDataDir,
                                           runId)
        run = createRunConfig(cls.cfg.session, patterns, runId, scanNum)
        cls.client.id_fields.runId = run.runId
        blockGroup = run.blockGroups[0]
        cls.client.id_fields.blkGrpId = blockGroup.blkGrpId
        block = blockGroup.blocks[0]
        cls.client.id_fields.blockId = block.blockId
        runCfg = copy_toplevel(run)
        reply = cls.client.sendCmdExpectSuccess(MsgEvent.StartRun, runCfg)
        blockGroupCfg = copy_toplevel(blockGroup)
        reply = cls.client.sendCmdExpectSuccess(MsgEvent.StartBlockGroup,
                                                blockGroupCfg)
        blockCfg = copy_toplevel(block)
        reply = cls.client.sendCmdExpectSuccess(MsgEvent.StartBlock, blockCfg)
        assert reply.result == MsgResult.Success
        cls.run = run
        cls.block = block
Exemplo n.º 6
0
def ClientMain(config: str, rayremote: str):
    ray.init(redis_address=rayremote)
    RtAttenModel_Remote = ray.remote(RtAttenModel_Ray)

    rtatten = RtAttenModel_Remote.remote()
    client = LocalClient(rtatten)
    cfg = loadConfigFile(config)
    client.start_session(cfg)
    subjectDataDir = getSubjectDataDir(cfg.session.dataDir,
                                       cfg.session.subjectNum,
                                       cfg.session.subjectDay)
    for runId in cfg.session.Runs:
        patterns, _ = Pats.getLocalPatternsFile(cfg.session, subjectDataDir,
                                                runId)
        run = Pats.createRunConfig(cfg.session, patterns, runId)
        validateRunCfg(run)
        client.do_run(run)
    client.end_session()
Exemplo n.º 7
0
def initializeGreenEyes(configFile):
    # load subject information
    # create directories for new niftis
    # randomize which category they'll be attending to and save that
    # purpose: load information and add to configuration things that you won't want to do each time a new file comes in
    # TO RUN AT THE START OF EACH RUN
    cfg = loadConfigFile(configFile)
    if cfg.sessionId in (None, '') or cfg.useSessionTimestamp is True:
        cfg.useSessionTimestamp = True
        cfg.sessionId = dateStr30(time.localtime())
    else:
        cfg.useSessionTimestamp = False
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    cfg.dataDir = cfg.codeDir + 'data'
    cfg.subjectDcmDir = glob.glob(cfg.imgDir.format(cfg.subjectName))[0]
    cfg.classifierDir = cfg.codeDir + cfg.classifierDir
    cfg.subject_full_day_path = '{0}/{1}/{2}'.format(cfg.dataDir, cfg.bids_id,
                                                     cfg.ses_id)
    cfg.temp_nifti_dir = '{0}/converted_niftis/'.format(
        cfg.subject_full_day_path)
    cfg.subject_reg_dir = '{0}/registration_outputs/'.format(
        cfg.subject_full_day_path)
    cfg.nStations, cfg.stationsDict, cfg.last_tr_in_station = getStationInformation(
        cfg)

    # REGISTRATION THINGS
    cfg.wf_dir = '{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(
        cfg.bidsDir, cfg.subjectNum)
    cfg.BOLD_to_T1 = cfg.wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
    cfg.T1_to_MNI = cfg.wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
    cfg.ref_BOLD = glob.glob(
        cfg.wf_dir +
        '/func_preproc_ses_01_task_story_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz'
    )[0]

    ###### BUILD SUBJECT FOLDERS #######
    buildSubjectFolders(cfg)
    ###### REGISTRATION PARAMETERS #######
    return cfg
Exemplo n.º 8
0
def generate_data(cfgFile):
    cfg = loadConfigFile(cfgFile)
    frame = inspect.currentframe()
    moduleFile = typing.cast(str, frame.f_code.co_filename)  # type: ignore
    moduleDir = os.path.dirname(moduleFile)
    cfgDate = parser.parse(cfg.session.date).strftime("%Y%m%d")
    dataDir = os.path.join(
        cfg.session.dataDir, "subject{}/day{}".format(cfg.session.subjectNum,
                                                      cfg.session.subjectDay))
    imgDir = os.path.join(
        cfg.session.imgDir, "{}.{}.{}".format(cfgDate, cfg.session.subjectName,
                                              cfg.session.subjectName))
    if os.path.exists(dataDir) and os.path.exists(imgDir):
        print(
            "output data and imgage directory already exist, skippig data generation"
        )
        return
    runPatterns = [
        'patternsdesign_1_20180101T000000.mat',
        'patternsdesign_2_20180101T000000.mat',
        'patternsdesign_3_20180101T000000.mat'
    ]
    template_filename = os.path.join(moduleDir, 'sub_template.nii.gz')
    noise_dict_filename = os.path.join(moduleDir, 'sub_noise_dict.txt')
    roiA_filename = os.path.join(moduleDir, 'ROI_A.nii.gz')
    roiB_filename = os.path.join(moduleDir, 'ROI_B.nii.gz')
    output_file_pattern = '001_0000{}_000{}.mat'
    if not os.path.exists(imgDir):
        os.makedirs(imgDir)
    if not os.path.exists(dataDir):
        os.makedirs(dataDir)

    print('Load data')
    template_nii = nibabel.load(template_filename)
    template = template_nii.get_data()
    # dimsize = template_nii.header.get_zooms()

    roiA_nii = nibabel.load(roiA_filename)
    roiB_nii = nibabel.load(roiB_filename)
    roiA = roiA_nii.get_data()
    roiB = roiB_nii.get_data()

    dimensions = np.array(template.shape[0:3])  # What is the size of the brain

    print('Create mask')
    # Generate the continuous mask from the voxels
    mask, template = sim.mask_brain(
        volume=template,
        mask_self=True,
    )
    # Write out the mask as matlab
    mask_uint8 = mask.astype(np.uint8)
    maskfilename = os.path.join(
        dataDir, 'mask_{}_{}.mat'.format(cfg.session.subjectNum,
                                         cfg.session.subjectDay))
    sio.savemat(maskfilename, {'mask': mask_uint8})

    # Load the noise dictionary
    with open(noise_dict_filename, 'r') as f:
        noise_dict = f.read()

    print('Loading ' + noise_dict_filename)
    noise_dict = eval(noise_dict)
    noise_dict['matched'] = 0

    runNum = 1
    scanNum = 0
    for patfile in runPatterns:
        fullPatfile = os.path.join(moduleDir, patfile)
        # make dataDir run directory
        runDir = os.path.join(dataDir, "run{}".format(runNum))
        if not os.path.exists(runDir):
            os.makedirs(runDir)
        shutil.copy(fullPatfile, runDir)
        runNum += 1

        pat = sio.loadmat(fullPatfile)
        scanNum += 1
        # shifted labels are in regressor field
        shiftedLabels = pat['patterns']['regressor'][0][0]
        # non-shifted labels are in attCateg field and whether stimulus applied in the stim field
        nsLabels = pat['patterns']['attCateg'][0][0] * pat['patterns']['stim'][
            0][0]
        labels_A = (nsLabels == 1).astype(int)
        labels_B = (nsLabels == 2).astype(int)

        # trialType = pat['patterns']['type'][0][0]
        tr_duration = pat['TR'][0][0]
        disdaqs = pat['disdaqs'][0][0]
        begTrOffset = disdaqs // tr_duration
        nTRs = pat['nTRs'][0][0]
        # nTestTRs = np.count_nonzero(trialType == 2)

        # Preset some of the parameters
        total_trs = nTRs + begTrOffset  # How many time points are there?

        print('Generating data')
        start = time.time()
        noiseVols = sim.generate_noise(
            dimensions=dimensions,
            stimfunction_tr=np.zeros((total_trs, 1)),
            tr_duration=int(tr_duration),
            template=template,
            mask=mask,
            noise_dict=noise_dict,
        )
        print("Time: generate noise vols {} sec".format(time.time() - start))

        nVoxelsA = int(roiA.sum())
        nVoxelsB = int(roiB.sum())
        # Multiply each pattern by each voxel time course
        weights_A = np.tile(labels_A.reshape(-1, 1), nVoxelsA)
        weights_B = np.tile(labels_B.reshape(-1, 1), nVoxelsB)

        print('Creating signal time course')
        signal_func_A = sim.convolve_hrf(
            stimfunction=weights_A,
            tr_duration=tr_duration,
            temporal_resolution=(1 / tr_duration),
            scale_function=1,
        )

        signal_func_B = sim.convolve_hrf(
            stimfunction=weights_B,
            tr_duration=tr_duration,
            temporal_resolution=(1 / tr_duration),
            scale_function=1,
        )

        max_activity = noise_dict['max_activity']
        signal_change = 10  # .01 * max_activity
        signal_func_A *= signal_change
        signal_func_B *= signal_change

        # Combine the signal time course with the signal volume
        print('Creating signal volumes')
        signal_A = sim.apply_signal(
            signal_func_A,
            roiA,
        )

        signal_B = sim.apply_signal(
            signal_func_B,
            roiB,
        )
        # Combine the two signal timecourses
        signal = signal_A + signal_B

        # testTrId = 0
        numVols = noiseVols.shape[3]
        for idx in range(numVols):
            start = time.time()
            brain = noiseVols[:, :, :, idx]
            if idx >= begTrOffset:
                # some initial scans are skipped as only instructions and not stimulus are shown
                signalIdx = idx - begTrOffset
                brain += signal[:, :, :, signalIdx]

            # TODO: how to create a varying combined percentage of A and B signals
            #     if trialType[0][idx] == 1:
            #         # training TR, so create pure A or B signal
            #         if labels_A[idx] != 0:
            #             brain = brain + roiA
            #         elif labels_B[idx] != 0:
            #             brain = brain + roiB
            #     elif trialType[0][idx] == 2:
            #         # testing TR, so create a mixture of A and B signal
            #         testTrId += 1
            #         testPercent = testTrId / nTestTRs
            #         brain = brain + testPercent * roiA + (1-testPercent) * roiB

            # Save the volume as a matlab file
            filenum = idx + 1
            filename = output_file_pattern.format(
                str(scanNum).zfill(2),
                str(filenum).zfill(3))
            outputfile = os.path.join(imgDir, filename)
            brain_float32 = brain.astype(np.float32)
            sio.savemat(outputfile, {'vol': brain_float32})
            print("Time: generate vol {}: {} sec".format(
                filenum,
                time.time() - start))
Exemplo n.º 9
0
def validateModelsMatlabPython(subjectNum, subjectDay, usesamedata):

    dataPath = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
    configFile = dataPath + 'subject' + str(
        subjectNum) + '/usedscripts/PennCfg_Day' + str(subjectDay) + '.toml'
    cfg = loadConfigFile(configFile)
    #subjectDayDir = getSubjectDayDir(cfg.session.subjectNum, cfg.session.subjectDay)
    subjectDayDir = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(
        cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
    matDataDir = subjectDayDir  #os.path.join(cfg.session.dataDir, subjectDayDir)
    pyDataDir = matDataDir
    all_vals = np.zeros((100, 2, cfg.session.Runs[-1] - 1))
    usenewmodel = 1
    #usesamedata = 1 #whether or not to use same data as with matlab
    for runId in np.arange(1, cfg.session.Runs[-1]):
        runDir = 'run' + str(runId) + '/'
        matModelFn = utils.findNewestFile(
            matDataDir, runDir + 'trainedModel_' + str(runId) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId) + '*_py.mat')
        matModel_train = utils.loadMatFile(matModelFn)
        # to find what matModel includes use matModel.keys() --> trainedModel, trainPats, trainLabels
        # for each model we have W [ nVoxel x 2 classes], biases [ 1 x 2 classes]
        # we can't apply this model to any of the examples in this run, but let's apply it to the first 4 blocks of the next run
        # now load testing data from the next run to test it on
        pyModel_train = utils.loadMatFile(pyModelFn)
        # INSTEAD MAKE NEW MODEL
        print(runId)
        if usenewmodel:
            lrc1 = LogisticRegression(penalty='l2', solver='sag', max_iter=300)
            lrc2 = LogisticRegression(penalty='l2', solver='sag', max_iter=300)
            if usesamedata:
                lrc1.fit(matModel_train.trainPats,
                         pyModel_train.trainLabels[:, 0])
                lrc2.fit(matModel_train.trainPats,
                         pyModel_train.trainLabels[:, 1])
            else:
                lrc1.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:,
                                                                            0])
                lrc2.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:,
                                                                            1])
            newTrainedModel = utils.MatlabStructDict({}, 'trainedModel')
            newTrainedModel.trainedModel = StructDict({})
            newTrainedModel.trainedModel.weights = np.concatenate(
                (lrc1.coef_.T, lrc2.coef_.T), axis=1)
            newTrainedModel.trainedModel.biases = np.concatenate(
                (lrc1.intercept_, lrc2.intercept_)).reshape(1, 2)
            newTrainedModel.trainPats = pyModel_train.trainPats
            newTrainedModel.trainLabels = pyModel_train.trainLabels
        # now load the models to test on
        matModelFn = utils.findNewestFile(
            matDataDir, 'run' + str(runId + 1) + '/' + 'trainedModel_' +
            str(runId + 1) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId + 1) + '*_py.mat')
        matModel_test = utils.loadMatFile(matModelFn)
        pyModel_test = utils.loadMatFile(pyModelFn)
        nTRTest = 100
        mat_test_data = matModel_test.trainPats[nTRTest:, :]
        py_test_data = pyModel_test.trainPats[nTRTest:, :]
        test_labels = matModel_test.trainLabels[nTRTest:, :]
        mat_cs = np.zeros((nTRTest, 1))
        py_cs = np.zeros((nTRTest, 1))
        for t in np.arange(nTRTest):
            categ = np.flatnonzero(test_labels[t, :])
            otherCateg = (categ + 1) % 2
            _, _, _, activations_mat = Test_L2_RLR_realtime(
                matModel_train, mat_test_data[t, :], test_labels[t, :])
            mat_cs[t] = activations_mat[categ] - activations_mat[otherCateg]
            if not usenewmodel:
                if not usesamedata:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        pyModel_train, py_test_data[t, :], test_labels[t, :])
                else:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        pyModel_train, mat_test_data[t, :], test_labels[t, :])
            else:
                if not usesamedata:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        newTrainedModel, py_test_data[t, :], test_labels[t, :])
                else:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        newTrainedModel, mat_test_data[t, :],
                        test_labels[t, :])
            py_cs[t] = activations_py[categ] - activations_py[otherCateg]
        all_vals[:, 0, runId - 1] = mat_cs[:, 0]
        all_vals[:, 1, runId - 1] = py_cs[:, 0]
        #plt.figure()
        #if usenewmodel:
        #	plt.plot(matModel_train.weights[:,0],newTrainedModel.weights[:,0], '.')
        #else:
        #	plt.plot(matModel_train.weights[:,0],pyModel_train.weights[:,0], '.')
        #plt.xlim([-.02 ,.02])
        #plt.ylim([-.02 ,.02])
        #plt.xlabel('MATLAB')
        #plt.ylabel('PYTHON')
        #plt.show()
    all_mat_ev = np.reshape(all_vals[:, 0, :],
                            ((cfg.session.Runs[-1] - 1) * 100, 1))
    all_py_ev = np.reshape(all_vals[:, 1, :],
                           ((cfg.session.Runs[-1] - 1) * 100, 1))
    fix, ax = plt.subplots(figsize=(12, 7))
    plt.plot(all_mat_ev, all_py_ev, '.')
    plt.plot([-5, 5], [-5, 5], '--k')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.xlabel('MATLAB')
    plt.ylabel('PYTHON')
    plt.xlim([-1.5, 1.5])
    plt.ylim([-1.5, 1.5])
    plt.show()

    plt.figure()
    plt.hist(all_mat_ev, alpha=0.6, label='matlab')
    plt.hist(all_py_ev, alpha=0.6, label='python')
    plt.xlabel('Correct - Incorrect Activation')
    plt.ylabel('Frequency')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.legend()
    plt.show()
subjectNum=np.int(sys.argv[1]) 

ndays = 3
auc_score = np.zeros((8,ndays)) # save larger to fit all days in
RT_cs = np.zeros((8,ndays))
nTRTest = 100
RT_cs_timecourse = np.zeros((8,nTRTest,ndays))
dataPath = '/data/jux/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
subjectDir =  dataPath + '/' + 'subject' + str(subjectNum) 
print(subjectNum)
for d in np.arange(ndays):
    print(d)
    subjectDay = d + 1
    configFile = dataPath + 'subject' + str(subjectNum) + '/usedscripts/PennCfg_Day' + str(subjectDay) + '.toml'
    cfg = loadConfigFile(configFile)
    subjectDayDir = '/data/jux/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
    pyDataDir = subjectDayDir
    if subjectDay == 1:
        nRuns = 7
        if subjectNum == 106:
            nRuns = 6
    elif subjectDay == 2:
        nRuns = 9
    elif subjectDay == 3:
        nRuns = 8
    #nruns = len(cfg.session.Runs) - 1
    for runId in np.arange(1,nRuns):
        print(runId)
        runDir = 'run'+str(runId)+'/'
        pyModelFn = utils.findNewestFile(pyDataDir, 'trainedModel_r'+str(runId)+'*_py.mat')
Exemplo n.º 11
0
    argParser.add_argument('--port',
                           '-p',
                           default=5200,
                           type=int,
                           help='server port')
    argParser.add_argument('--experiment',
                           '-e',
                           default='conf/example.toml',
                           type=str,
                           help='experiment file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default=None,
                           type=str,
                           help='Comma separated list of run numbers')
    args = argParser.parse_args()

    cfg = loadConfigFile(args.experiment)

    client = RtAttenClient()
    client.connect(args.addr, args.port)
    client.initSession(cfg)
    for runId in args.runs.split(','):
        print('retrieve files for run {}'.format(runId))
        client.retrieveRunFiles(runId)

    client.endSession()
    client.disconnect()
    print('Done')
    sys.exit(0)
def transferdicoms(subject,day):
	print('transferring dicom files for subject %i, day %i' % (subject,day))

	from rtfMRI.RtfMRIClient import RtfMRIClient, loadConfigFile
	from shutil import copyfile
	import numpy as np
	import datetime
	import glob
	import os
	import shutil
	import sys

	exp_dir = "/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/"
	exp = exp_dir + 'subject' + str(subject) + '/usedscripts/PennCfg_Day' + str(day) + '.toml'
	#exp="/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject1/usedscripts/PennCfg_day1.toml"
	cfg=loadConfigFile(exp)
	# add check here that subject name matches!! ###
	subjectName=cfg.session.subjectName
	subjectNum=cfg.session.subjectNum
	if subjectNum is not subject:
		raise ValueError('Subject number does not agree with cfg file')
	subjectDay=cfg.session.subjectDay
	if subjectDay is not day:
		raise ValueError('Day number does not agree with cfg file')
	if subjectDay==1:
		allRuns=np.array(cfg.session.Runs1)
		allScans=np.array(cfg.session.ScanNums1)
	elif subjectDay==2:
		allRuns=np.array(cfg.session.Runs2)
		allScans=np.array(cfg.session.ScanNums2)
	elif subjectDay==3:
		allRuns=np.array(cfg.session.Runs3)
		allScans=np.array(cfg.session.ScanNums3)
	dicom_dir="/data/jag/cnds/amennen/rtAttenPenn/fmridata/transferredImages"
	#dt = datetime.datetime.strptime(cfg.session.date,"%m/%d/%Y")
	dt = cfg.session.sessionId[0:8]
	#dicom_folder=dicom_dir + "/" +  datetime.datetime.strftime(dt,"%Y%m%d") + "." + subjectName + "." + subjectName
	dicom_folder = dicom_dir + '/' + dt + '.' + subjectName + '.' + subjectName
	print('loading from %s' % dicom_folder)
	# we know what the numbers were for the functional runs onwards
	SCAN_NUMBERS= {}
	nRuns = len(allRuns)
	for r in np.arange(nRuns):
		runNumber = r + 1
		SCAN_NUMBERS['gonogo%i' % runNumber] = allScans[r]-1 # important: both moco and non-moco versions
	#dayorder={}
	#day1order=['scout', 'T1w', 'faces1', 'faces2', 'gonogo1', 'gonogo2', 'gonogo3', 'gonogo4', 'gonogo5', 'gonogo6', 'gonogo7', 'fmap1', 'fmap2']
	allfunc={}
	allfunc[1]=['faces1', 'faces2', 'gonogo1', 'gonogo2', 'gonogo3', 'gonogo4', 'gonogo5', 'gonogo6', 'gonogo7']
	allfunc[2]=['exfunc', 'gonogo1', 'gonogo2', 'gonogo3', 'gonogo4', 'gonogo5', 'gonogo6', 'gonogo7', 'gonogo8', 'gonogo9']
	allfunc[3]=['exfunc','gonogo1', 'gonogo2', 'gonogo3', 'gonogo4', 'gonogo5', 'gonogo6', 'gonogo7', 'gonogo8','faces1', 'faces2' ]
	anat=['scout', 'T1w']
	fmap=['fmap1', 'fmap2']
	last_scan = glob.glob(dicom_folder + "/*.dcm")[-1]
	last_run = int(last_scan[-17:-11])
	DICOM_DICT = {}
	NDICOM = {}
	for s_ind in np.arange(last_run):
		this_runs_dicom = glob.glob(dicom_folder + "/001_%6.6i*.dcm" % (s_ind+1))
		DICOM_DICT[s_ind+1] = this_runs_dicom
		NDICOM[s_ind+1] = len(this_runs_dicom)
		print("FOUND: scanning run %i contains %i dicom files" %(s_ind+1,len(this_runs_dicom)))

	scout_begin_TR=128
	faces_TR=147
	gonogo_TR=242
	fmap1_TR=120
	fmap2_TR=60
	exfunc_TR=10
	anat_TR=176
	anat = [key for key,val in NDICOM.items() if val==anat_TR]
	scout1 = [key for key,val in NDICOM.items() if val==scout_begin_TR][0]
	faces = [key for key,val in NDICOM.items() if val==faces_TR]
	fmap1 = [key for key,val in NDICOM.items() if val==fmap1_TR]
	fmap2 = [key for key,val in NDICOM.items() if val==fmap2_TR]
	exfunc = [key for key,val in NDICOM.items() if val==exfunc_TR]
	SCAN_NUMBERS['scout'] = np.arange(scout1,scout1+4)
	if len(anat)>0:
		SCAN_NUMBERS['T1w'] = anat[0]
		ANAT_TAKEN = True
	else:
		ANAT_TAKEN = False
	if len(faces) > 0:
		SCAN_NUMBERS['faces1'] = faces[0]
		SCAN_NUMBERS['faces2'] = faces[2]
	if len(exfunc) > 0:
		SCAN_NUMBERS['exfunc'] = exfunc[0]
	SCAN_NUMBERS['fmap1'] = fmap1[0]
	SCAN_NUMBERS['fmap2'] = fmap2[0]
	############################################################################################
	# CHECK ALL THE SCAN NUMBERS ARE CORRECT
	############################################################################################
	print('*******************************')
	print('scan numbers are')
	for k, v in SCAN_NUMBERS.items():
		print(k, v)
	correct_scans = input('Are the scan numbers correct?y/n\n')
	if correct_scans == 'n':
		sys.exit('wrong scan numbers!\n MODIFY THEM IN %s' % exp )
	############################################################################################
	# CHECK ALL THE SCAN NUMBERS ARE CORRECT
	############################################################################################

	# now we can iterate over the whole folder and copy the dicom files over into new directory
	dicom_out="/data/jag/cnds/amennen/rtAttenPenn/fmridata/Dicom"
	bids_id = 'sub-{0:03d}'.format(subjectNum)
	ses_id = 'ses-{0:02d}'.format(subjectDay)
	day_path=os.path.join(dicom_out,bids_id,ses_id)
	# now make each directory for type of scan
	if ANAT_TAKEN:
		scantypes=['anat', 'fmap', 'func']
	else:
		scantypes=['fmap', 'func']
	for s in scantypes:
		full_path=os.path.join(day_path,s)
		if not os.path.exists(full_path):
			os.makedirs(full_path)

	# start with scout
	#src_files = DICOM_DICT[SCAN_NUMBERS['scout']]
	#dest_path = os.path.join(day_path,'anat','scout')
	#if not os.path.exists(dest_path):
	#	os.makedirs(dest_path)
	#for file_name in src_files:
	#    fn=os.path.split(file_name)[-1]
	#    dest=os.path.join(dest_path,fn)
	#    shutil.copyfile(file_name, dest)

	# first transfer anat
	if ANAT_TAKEN:
		src_files = DICOM_DICT[SCAN_NUMBERS['T1w']]
		dest_path = os.path.join(day_path,'anat', 'T1w')
		exfile=os.path.split(src_files[0])[-1]
		print('copying run from %s into %s' % (exfile,dest_path))
		if not os.path.exists(dest_path):
			os.makedirs(dest_path)
		for file_name in src_files:
			fn=os.path.split(file_name)[-1]
			dest=os.path.join(dest_path,fn)
			shutil.copyfile(file_name, dest)

	dest_path = os.path.join(day_path, 'func')
	n_func_scans = len(allfunc[subjectDay])
	scan_names=allfunc[subjectDay]
	nrunsperfunc = 2
	faces_counter=0
	gonogo_counter=0
	for f_ind in np.arange(n_func_scans):
		this_scan=scan_names[f_ind]
		if 'faces' in this_scan:
			task='faces'
			faces_counter+=1
			run=faces_counter
		elif 'gonogo' in this_scan:
			task='gonogo'
			gonogo_counter+=1
			run=gonogo_counter
		elif 'exfunc' in this_scan:
			task='exfunc'
			run=1
		
		full_name= 'task-' + task + '_' + 'rec-' + 'uncorrected' + '_' + 'run-{0:02d}'.format(run) + '_bold'
		dicom_out=os.path.join(dest_path,full_name)
		if not os.path.exists(dicom_out):
			os.makedirs(dicom_out)
		src_files = DICOM_DICT[SCAN_NUMBERS[this_scan]]
		exfile=os.path.split(src_files[0])[-1]
		print('copying run from %s into %s' % (exfile,dicom_out))
		for file_name in src_files:
			fn=os.path.split(file_name)[-1]
			dest = os.path.join(dicom_out,fn)
			shutil.copyfile(file_name, dest)
	
		full_name= 'task-' + task + '_' + 'rec-' + 'corrected' + '_' + 'run-{0:02d}'.format(run) + '_bold'
		dicom_out=os.path.join(dest_path,full_name)
		if not os.path.exists(dicom_out):
			os.makedirs(dicom_out)
		src_files = DICOM_DICT[SCAN_NUMBERS[this_scan] + 1]
		exfile=os.path.split(src_files[0])[-1]
		print('copying run from %s into %s' % (exfile,dicom_out))
		for file_name in src_files:
			fn=os.path.split(file_name)[-1]
			dest = os.path.join(dicom_out,fn)
			shutil.copyfile(file_name, dest)

	# now do same for fmap
	this_scan='fmap1'
	# first scan is magnitude
	dest_path = os.path.join(day_path,'fmap')
	full_name='magnitude1'
	dicom_out=os.path.join(dest_path,full_name)
	if not os.path.exists(dicom_out):
		os.makedirs(dicom_out)
	src_files = DICOM_DICT[SCAN_NUMBERS[this_scan]]
	exfile=os.path.split(src_files[0])[-1]
	print('copying run from %s into %s' % (exfile,dicom_out))
	for file_name in src_files:
		fn=os.path.split(file_name)[-1]
		dest = os.path.join(dicom_out,fn)
		shutil.copyfile(file_name, dest)
	
	this_scan='fmap2'
	dest_path = os.path.join(day_path,'fmap')
	full_name='phasediff'
	dicom_out=os.path.join(dest_path,full_name)
	if not os.path.exists(dicom_out):
		os.makedirs(dicom_out)
	src_files = DICOM_DICT[SCAN_NUMBERS[this_scan]]
	exfile=os.path.split(src_files[0])[-1]
	print('copying run from %s into %s' % (exfile,dicom_out))
	for file_name in src_files:
		fn=os.path.split(file_name)[-1]
		dest = os.path.join(dicom_out,fn)
		shutil.copyfile(file_name, dest)
Exemplo n.º 13
0
def initializeGreenEyes(configFile,params):
    # load subject information
    # create directories for new niftis
    # randomize which category they'll be attending to and save that
    # purpose: load information and add to configuration things that you won't want to do each time a new file comes in
    # TO RUN AT THE START OF EACH RUN

    cfg = loadConfigFile(configFile)
    if cfg.sessionId in (None, '') or cfg.useSessionTimestamp is True:
        cfg.useSessionTimestamp = True
        cfg.sessionId = dateStr30(time.localtime())
    else:
        cfg.useSessionTimestamp = False
    # MERGE WITH PARAMS
    if params.runs is not None:
        if params.scans is None:
            raise InvocationError(
            "Scan numbers must be specified when run numbers are specified.\n"
            "Use -s to input scan numbers that correspond to the runs entered.")
        cfg.runs = [int(x) for x in params.runs.split(',')]
        cfg.scanNums = [int(x) for x in params.scans.split(',')]
    # GET DICOM DIRECTORY
    if cfg.mode is not debug:
        if cfg.buildImgPath:
            imgDirDate = datetime.now()
            dateStr = cfg.date.lower()
            if dateStr != 'now' and dateStr != 'today':
                try:
                    imgDirDate = parser.parse(cfg.date)
                except ValueError as err:
                    raise RequestError('Unable to parse date string {} {}'.format(cfg.date, err))
            datestr = imgDirDate.strftime("%Y%m%d")
            imgDirName = "{}.{}.{}".format(datestr, cfg.subjectName, cfg.subjectName)
            cfg.dicomDir = os.path.join(cfg.intelrt.imgDir, imgDirName)
        else:
            cfg.dicomDir = cfg.intelrt.imgDir # then the whole path was supplied
    else:
        cfg.dicomDir = glob.glob(cfg.cluster.imgDir.format(cfg.subjectName))[0]
    cfg.webpipe = params.webpipe
    cfg.webfilesremote = params.webfilesremote # FLAG FOR REMOTE OR LOCAL
	########
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    if cfg.mode == 'local':
        # then all processing is happening on linux too
        cfg.dataDir = cfg.intelrt.codeDir + 'data'
        cfg.classifierDir = cfg.intelrt.classifierDir
        cfg.mask_filename = cfg.intelrt.maskDir + cfg.MASK
        cfg.MNI_ref_filename = cfg.intelrt.maskDir + cfg.MNI_ref_BOLD
    elif cfg.mode == 'cloud':
        cfg.dataDir = cfg.cloud.codeDir + 'data'
        cfg.classifierDir = cfg.cloud.classifierDir
        cfg.mask_filename = cfg.cloud.maskDir + cfg.MASK
        cfg.MNI_ref_filename = cfg.intelrt.maskDir + cfg.MNI_ref_BOLD
        cfg.intelrt.subject_full_day_path = '{0}/data/{1}/{2}'.format(cfg.intelrt.codeDir,dataDir,cfg.bids_id,cfg.ses_id)
    elif cfg.mode == 'debug':
        cfg.dataDir = cfg.cluster.codeDir + 'data'
        cfg.classifierDir = cfg.cluster.classifierDir
        cfg.mask_filename = cfg.cluster.maskDir + cfg.MNI_ref_BOLD

	
    cfg.subject_full_day_path = '{0}/{1}/{2}'.format(cfg.dataDir,cfg.bids_id,cfg.ses_id)
    cfg.temp_nifti_dir = '{0}/converted_niftis/'.format(cfg.subject_full_day_path)
    cfg.subject_reg_dir = '{0}/registration_outputs/'.format(cfg.subject_full_day_path)
    cfg.nStations, cfg.stationsDict, cfg.last_tr_in_station = getStationInformation(cfg)

	# REGISTRATION THINGS
    cfg.wf_dir = '{0}/{1}/ses-{2:02d}/registration/'.format(cfg.dataDir,cfg.bids_id,1)
    cfg.BOLD_to_T1= cfg.wf_dir + 'affine.txt'
    cfg.T1_to_MNI= cfg.wf_dir + 'ants_t1_to_mniComposite.h5'
    cfg.ref_BOLD=cfg.wf_dir + 'ref_image.nii.gz'

    # GET CONVERSION FOR HOW TO FLIP MATRICES
    cfg.axesTransform = getTransform()
    ###### BUILD SUBJECT FOLDERS #######
    return cfg
Exemplo n.º 14
0
def validateModelsMatlabPython(subjectNum, subjectDay):

    dataPath = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
    configFile = dataPath + 'subject' + str(
        subjectNum) + '/usedscripts/PennCfg_Day' + str(subjectDay) + '.toml'
    cfg = loadConfigFile(configFile)
    #subjectDayDir = getSubjectDayDir(cfg.session.subjectNum, cfg.session.subjectDay)
    subjectDayDir = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(
        cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
    matDataDir = subjectDayDir  #os.path.join(cfg.session.dataDir, subjectDayDir)
    pyDataDir = matDataDir
    all_vals = np.zeros((100, 2, cfg.session.Runs[-1] - 1))
    for runId in np.arange(1, cfg.session.Runs[-1]):
        runDir = 'run' + str(runId) + '/'
        matModelFn = utils.findNewestFile(
            matDataDir, runDir + 'trainedModel_' + str(runId) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId) + '*_py.mat')
        matModel_train = utils.loadMatFile(matModelFn)
        # to find what matModel includes use matModel.keys() --> trainedModel, trainPats, trainLabels
        # for each model we have W [ nVoxel x 2 classes], biases [ 1 x 2 classes]
        # we can't apply this model to any of the examples in this run, but let's apply it to the first 4 blocks of the next run
        # now load testing data from the next run to test it on
        pyModel_train = utils.loadMatFile(pyModelFn)
        # now load the models to test on
        matModelFn = utils.findNewestFile(
            matDataDir, 'run' + str(runId + 1) + '/' + 'trainedModel_' +
            str(runId + 1) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId + 1) + '*_py.mat')
        matModel_test = utils.loadMatFile(matModelFn)
        pyModel_test = utils.loadMatFile(pyModelFn)
        nTRTest = 100
        mat_test_data = matModel_test.trainPats[nTRTest:, :]
        py_test_data = pyModel_test.trainPats[nTRTest:, :]
        test_labels = matModel_test.trainLabels[nTRTest:, :]
        mat_cs = np.zeros((nTRTest, 1))
        py_cs = np.zeros((nTRTest, 1))
        for t in np.arange(nTRTest):
            categ = np.flatnonzero(test_labels[t, :])
            otherCateg = (categ + 1) % 2
            _, _, _, activations_mat = Test_L2_RLR_realtime(
                matModel_train, mat_test_data[t, :], test_labels[t, :])
            mat_cs[t] = activations_mat[categ] - activations_mat[otherCateg]
            _, _, _, activations_py = Test_L2_RLR_realtime(
                pyModel_train, py_test_data[t, :], test_labels[t, :])
            py_cs[t] = activations_py[categ] - activations_py[otherCateg]
        all_vals[:, 0, runId - 1] = mat_cs[:, 0]
        all_vals[:, 1, runId - 1] = py_cs[:, 0]
    all_mat_ev = np.reshape(all_vals[:, 0, :],
                            ((cfg.session.Runs[-1] - 1) * 100, 1))
    all_py_ev = np.reshape(all_vals[:, 1, :],
                           ((cfg.session.Runs[-1] - 1) * 100, 1))
    #fix,ax = plt.subplots(figsize=(12,7))
    #plt.plot(all_mat_ev,all_py_ev, '.')
    #plt.plot([-5,5],[-5,5], '--k')
    #plt.title('S%i MAT x PY CORR = %4.4f' % (cfg.session.subjectNum, scipy.stats.pearsonr(all_mat_ev,all_py_ev)[0][0]))
    #plt.xlabel('MATLAB')
    #plt.ylabel('PYTHON')
    #plt.xlim([-1.5,1.5])
    #plt.ylim([-1.5,1.5])
    #plt.show()

    plt.figure()
    plt.hist(all_mat_ev, alpha=0.6, label='matlab')
    plt.hist(all_py_ev, alpha=0.6, label='python')
    plt.xlabel('Correct - Incorrect Activation')
    plt.ylabel('Frequency')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.legend()
    plt.show()
Exemplo n.º 15
0
def train_test_python_classifier(subjectNum):
    ndays = 3
    auc_score = np.zeros((8, ndays))  # save larger to fit all days in
    RT_cs = np.zeros((8, ndays))
    dataPath = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
    subjectDir = dataPath + '/' + 'subject' + str(subjectNum)
    print(subjectNum)
    all_python_evidence = np.zeros(
        (9, 100, 3))  # time course of classifier evidence
    for d in np.arange(ndays):
        print(d)
        subjectDay = d + 1
        configFile = dataPath + 'subject' + str(
            subjectNum) + '/usedscripts/PennCfg_Day' + str(
                subjectDay) + '.toml'
        cfg = loadConfigFile(configFile)
        subjectDayDir = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(
            cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
        pyDataDir = subjectDayDir
        if subjectDay == 1:
            nRuns = 7
            print('here')
            if str(subjectNum) == '106':
                nRuns = 6
                print('here')
            else:
                print(subjectNum)
                if subjectNum == 106:
                    print('finding it here')
                print('nothere')
        elif subjectDay == 2:
            nRuns = 9
        elif subjectDay == 3:
            nRuns = 8
        print('total number of runs: %i' % nRuns)
        print(subjectNum)
        print(subjectDay)
        print(nRuns)
        #nruns = len(cfg.session.Runs) - 1
        #nruns = len(cfg.session.Runs) - 1
        for r in np.arange(0, nRuns - 1):
            runId = r + 1  # now it goes from 0 : n Runs - 1
            print(runId)
            runDir = 'run' + str(runId) + '/'
            pyModelFn = utils.findNewestFile(
                pyDataDir, 'trainedModel_r' + str(runId) + '*_py.mat')
            # to find what matModel includes use matModel.keys() --> trainedModel, trainPats, trainLabels
            # for each model we have W [ nVoxel x 2 classes], biases [ 1 x 2 classes]
            # we can't apply this model to any of the examples in this run, but let's apply it to the first 4 blocks of the next run
            # now load testing data from the next run to test it on
            pyModel_train = utils.loadMatFile(pyModelFn)
            # INSTEAD MAKE NEW MODEL
            lrc1 = LogisticRegression(penalty='l2',
                                      solver='saga',
                                      max_iter=300)
            lrc2 = LogisticRegression(penalty='l2',
                                      solver='saga',
                                      max_iter=300)

            lrc1.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:, 0])
            lrc2.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:, 1])
            newTrainedModel = utils.MatlabStructDict({}, 'trainedModel')
            newTrainedModel.trainedModel = StructDict({})
            newTrainedModel.trainedModel.weights = np.concatenate(
                (lrc1.coef_.T, lrc2.coef_.T), axis=1)
            newTrainedModel.trainedModel.biases = np.concatenate(
                (lrc1.intercept_, lrc2.intercept_)).reshape(1, 2)
            newTrainedModel.trainPats = pyModel_train.trainPats
            newTrainedModel.trainLabels = pyModel_train.trainLabels

            # now load testing data for CV
            pyModelFn = utils.findNewestFile(
                pyDataDir, 'trainedModel_r' + str(runId + 1) + '*_py.mat')
            pyModel_test = utils.loadMatFile(pyModelFn)
            nTRTest = 100
            py_test_data = pyModel_test.trainPats[nTRTest:, :]
            test_labels = pyModel_test.trainLabels[nTRTest:, :]
            py_cs = np.zeros((nTRTest, 1))
            activations = np.zeros((nTRTest, 2))
            for t in np.arange(nTRTest):
                _, _, _, activations_py = Test_L2_RLR_realtime(
                    newTrainedModel, py_test_data[t, :], test_labels[t, :])
                activations[t, :] = activations_py

            fpr2, tpr2, thresholds2 = metrics.roc_curve(test_labels[:, 1],
                                                        activations[:, 1] -
                                                        activations[:, 0],
                                                        pos_label=1)
            auc_score[r, d] = metrics.auc(
                fpr2, tpr2
            )  # auc of this data applied to the first half of the next run
            # now apply to block data-- realtime values
            pyDataFn = utils.findNewestFile(
                pyDataDir, 'blkGroup_r' + str(runId + 1) + '_p2_*_py.mat')
            pyData_test = utils.loadMatFile(pyDataFn)
            regressor = pyData_test.regressor
            TRs_to_test = np.argwhere(np.sum(regressor, axis=0))
            RT_data = pyData_test.raw_sm_filt_z[TRs_to_test, :]
            RT_regressor = regressor[:, TRs_to_test].T.reshape(nTRTest, 2)
            # now do the same thing and test for every TR --> get category separation
            cs = np.zeros((nTRTest, 1))
            for t in np.arange(nTRTest):
                categ = np.flatnonzero(RT_regressor[t, :])
                otherCateg = (categ + 1) % 2
                _, _, _, activations_py = Test_L2_RLR_realtime(
                    newTrainedModel, RT_data[t, :].flatten(),
                    RT_regressor[t, :])
                cs[t] = activations_py[categ] - activations_py[otherCateg]

            # take average for this run
            RT_cs[r, d] = np.mean(cs)
            all_python_evidence[r, :, d] = cs[:, 0]
    outfile = subjectDir + '/' 'offlineAUC_RTCS'
    np.savez(outfile, auc=auc_score, cs=RT_cs, all_ev=all_python_evidence)
Exemplo n.º 16
0
		cfg.intelrt.BOLD_to_T1= cfg.intelrt.wf_dir + 'affine.txt'
		cfg.intelrt.T1_to_MNI= cfg.intelrt.wf_dir + 'ants_t1_to_mniComposite.h5'
		cfg.intelrt.ref_BOLD=cfg.intelrt.wf_dir + 'ref_image.nii.gz'

def main():
	
	# MAKES STRUCT WITH ALL PARAMETERS IN IT
	argParser = argparse.ArgumentParser()
	argParser.add_argument('--config', '-c', default='greenEyes_organized.toml',type=str,
	               help='experiment config file (.json or .toml)')
    argParser.add_argument('--machine', '-m', default='intelrt',type=str,
                   help='which machine is running this script (intelrt) or (cloud)')
	args = argParser.parse_args()
	params = StructDict({'config': args.config, 'machine': args.machine})
	
    cfg = loadConfigFile(params.config)
	cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
	cfg.ses_id = 'ses-{0:02d}'.format(1)
	# get subj

	if params.machine == 'intel':
		# get intel computer ready
		cfg = buildSubjectFoldersIntelrt(cfg)
        if cfg.subjectDay == 2:
    		cluster_wf_dir='{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.clusterBidsDir,cfg.subjectNum)
    		cluster_BOLD_to_T1= cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
    		cluster_T1_to_MNI= cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
    		cluster_ref_BOLD=glob.glob(cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz')[0]
    		copyClusterFileToIntel(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
    		copyClusterFileToIntel(cluster_T1_to_MNI,cfg.subject_offline_registration_path)
    		copyClusterFileToIntel(cluster_ref_BOLD,cfg.subject_offline_registration_path)
Exemplo n.º 17
0
import shutil
# Add current working dir so main can be run from the top level rtAttenPenn directory
import sys
sys.path.append(os.getcwd())
from rtfMRI.RtfMRIClient import loadConfigFile, validateRunCfg
from rtAtten.RtAttenModel import getSubjectDataDir
from rtAtten.PatternsDesign2Config import createRunConfig, getLocalPatternsFile

# parse experiment file
# copy run images every 2 seconds

expFile = 'conf/example.toml'
srcDir = '/scratch/amennen/newdataforgrant/20180122.0122182_rtAttenPenn.0122182_rtAttenPenn'
timeDelay = 2  # seconds

cfg = loadConfigFile(expFile)
dstDir = cfg.session.imgDir
print("Destination Dir: {}".format(dstDir))
if not os.path.exists(dstDir):
    os.makedirs(dstDir)

subjectDataDir = getSubjectDataDir(cfg.session.dataDir, cfg.session.subjectNum,
                                   cfg.session.subjectDay)
for runId in cfg.session.Runs:
    patterns, _ = getLocalPatternsFile(cfg.session, subjectDataDir, runId)
    run = createRunConfig(cfg.session, patterns, runId)
    validateRunCfg(run)
    scanNumStr = str(run.scanNum).zfill(2)
    for blockGroup in run.blockGroups:
        for block in blockGroup.blocks:
            for TR in block.TRs:
Exemplo n.º 18
0
def configData():
    currentDir = os.path.dirname(os.path.realpath(__file__))
    cfg = loadConfigFile(
        os.path.join(currentDir, '../rtfMRI/syntheticDataCfg.toml'))
    return cfg