Пример #1
0
def main(argv=None):
    """
    This is the main function that is called when you run 'intialize.py'.
    
    Here, you will load the configuration settings specified in the toml configuration 
    file, initiate the class dataInterface, and set up some directories and other 
    important things through 'initialize()'
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = utils.loadConfigFile(args.config)

    # establish the RPC connection to the projectInterface
    clientInterface = ClientInterface()

    # now that we have the necessary variables, call the function 'initialize' in
    #   order to actually start reading dicoms and doing your analyses of interest!
    #   INPUT:
    #       [1] cfg (configuration file with important variables)
    #       [2] dataInterface (this will allow a script from the cloud to access files
    #               from the stimulus computer)
    initialize(cfg, clientInterface.dataInterface)
    return 0
Пример #2
0
 def on_getDefaultConfig(self):
     """Return default configuration settings for the project"""
     # TODO - may need to remove certain fields that can't be jsonified
     if self.configFilename is not None and self.configFilename != '':
         cfg = loadConfigFile(self.configFilename)
     else:
         cfg = self.cfg
     self.webUI.sendConfig(cfg, filename=self.configFilename)
Пример #3
0
def main():
    random.seed(datetime.now())
    # MAKES STRUCT WITH ALL PARAMETERS IN IT
    defaultConfig = os.path.join(currPath , 'conf/greenEyes_organized.toml')
    #defaultConfig = 'conf/greenEyes_organized.toml'
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config', '-c', default=defaultConfig,type=str,
                   help='experiment config file (.json or .toml)')
    argParser.add_argument('--addr', '-a', default='localhost', type=str, 
                   help='server ip address')
    args = argParser.parse_args()
    params = StructDict({'config': args.config})

    cfg = loadConfigFile(params.config)
    #cfg = loadConfigFile(defaultConfig)
    # TESTING
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    # get subj
    if cfg.machine == 'intel':
        # get intel computer ready
        cfg = buildSubjectFoldersIntelrt(cfg)
        if cfg.subjectDay == 2:
            cluster_wf_dir = '{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.cluster.clusterBidsDir,cfg.subjectNum)
            cluster_BOLD_to_T1 = cluster_wf_dir + '/func_preproc_ses_01_task_examplefunc_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
            cluster_T1_to_MNI = cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
            cluster_ref_BOLD = cluster_wf_dir + '/func_preproc_ses_01_task_examplefunc_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz'
            copyClusterFileToIntel(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
            copyClusterFileToIntel(cluster_T1_to_MNI,cfg.subject_offline_registration_path)
            copyClusterFileToIntel(cluster_ref_BOLD,cfg.subject_offline_registration_path)
            # now see if you need to randomly draw the intepretation
            makeSubjectInterpretation(cfg)
            if cfg.mode == 'cloud': # also copy files to the cloud computer -- easier here to just copy entire folder
                cfg.subject_full_path = '{0}/data/{1}'.format(cfg.intelrt.codeDir,cfg.bids_id)
                locationToSend = '{0}/data/'.format(cfg.cloud.codeDir)
                if args.addr is not 'localhost':
                    copyIntelFolderToCloud(cfg.subject_full_path,locationToSend,args.addr)
                else:
                    logging.warning('YOU NEED TO INPUT CLOUD IP ADDR!!')
                    print('YOU NEED TO INPUT CLOUD IP ADDR!!')
    # elif cfg.machine == 'cloud':
    #     # get cloud computer ready
    #     cfg = buildSubjectFoldersCloud(cfg)
    #     fileInterface = FileInterface()
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.BOLD_to_T1,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.T1_to_MNI,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.ref_BOLD,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveInfelFileAndSaveToCloud(cfg.intelrt.interpretationFile,cfg.subject_full_day_path,fileInterface)
    elif cfg.machine == 'cluster': # running on cluster computer
        cluster_wf_dir='{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.cluster.clusterBidsDir,cfg.subjectNum)
        cluster_BOLD_to_T1= cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
        cluster_T1_to_MNI= cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
        cluster_ref_BOLD=glob.glob(cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz')[0]
        cfg = buildSubjectFoldersCluster(cfg)
        copyClusterFileToCluster(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
        copyClusterFileToCluster(cluster_T1_to_MNI,cfg.subject_offline_registration_path)
        copyClusterFileToCluster(cluster_ref_BOLD,cfg.subject_offline_registration_path)
        makeSubjectInterpretation(cfg)
Пример #4
0
def main(argv=None):
    """
    This is the main function that is called when you run 'intialize.py'.
    
    Here, you will load the configuration settings specified in the toml configuration 
    file, initiate the class dataInterface, and set up some directories and other 
    important things through 'initialize()'
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    args = argParser.parse_args(argv)

    print('Initializing directories and configurations')

    # establish the RPC connection to the projectInterface
    clientInterface = ClientInterface()
    dataInterface = clientInterface.dataInterface
    args.dataRemote = clientInterface.isDataRemote()

    # load the experiment configuration file
    cfg = utils.loadConfigFile(args.config)
    cfg = initialize(cfg, args)

    # build subject folders on server
    if args.dataRemote:
        print('Files Remote Case')

        buildSubjectFoldersOnServer(cfg)

        # next, transfer transformation files from local --> server for online processing
        uploadFolderToCloud(dataInterface, cfg.local.wf_dir, cfg.server.wf_dir)

        # upload ROI folder to cloud server - we would need to do this if we were using
        # a standard mask, but we're not in this case
        #uploadFolderToCloud(dataInterface, cfg.local.maskDir, cfg.server.maskDir)

        # upload all transformed masks to the cloud
        uploadFilesFromList(dataInterface, cfg.local_MASK_transformed,
                            cfg.subject_reg_dir)

    print('Initialization Complete!')
    return 0
Пример #5
0
def main(argv=None):
    """
    This is the main function that is called when you run 'intialize.py'.
    
    Here, you will load the configuration settings specified in the toml configuration 
    file, initiate the class fileInterface, and set up some directories and other 
    important things through 'initialize()'
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config', '-c', default=defaultConfig, type=str,
                           help='experiment config file (.json or .toml)')
    # This parameter is used for projectInterface
    argParser.add_argument('--commpipe', '-q', default=None, type=str,
                           help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote', '-x', default=False, action='store_true',
                           help='retrieve files from the remote server')
    argParser.add_argument('--addr', '-a', default='localhost', type=str, 
               help='server ip address')
    argParser.add_argument('--runs', '-r', default='', type=str,
                       help='Comma separated list of run numbers')
    argParser.add_argument('--scans', '-s', default='', type=str,
                       help='Comma separated list of scan number')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = utils.loadConfigFile(args.config)
    cfg = initialize(cfg, args)

    # build subject folders on server
    if args.filesremote:
        buildSubjectFoldersOnServer(cfg)

        # open up the communication pipe using 'projectInterface'
        projectComm = projUtils.initProjectComm(args.commpipe, args.filesremote)

        # initiate the 'fileInterface' class, which will allow you to read and write 
        #   files and many other things using functions found in 'fileClient.py'
        #   INPUT:
        #       [1] args.filesremote (to retrieve dicom files from the remote server)
        #       [2] projectComm (communication pipe that is set up above)
        fileInterface = FileInterface(filesremote=args.filesremote, commPipes=projectComm)

        # next, transfer transformation files from local --> server for online processing
        projUtils.uploadFolderToCloud(fileInterface,cfg.local.wf_dir,cfg.server.wf_dir)

        # upload ROI folder to cloud server - we would need to do this if we were using
        # a standard mask, but we're not in this case
        #projUtils.uploadFolderToCloud(fileInterface,cfg.local.maskDir,cfg.server.maskDir)

        # upload all transformed masks to the cloud
        projUtils.uploadFilesFromList(fileInterface,cfg.local_MASK_transformed,cfg.subject_reg_dir)
    return 0
Пример #6
0
def main(argv=None):
    """
    This is the main function that is called when you run 'finalize.py'.
    
    Here, you will load the configuration settings specified in the toml configuration 
    file, initiate the class fileInterface, and set up some directories and other 
    important things through 'finalize()'
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    # This parameter is used for projectInterface
    argParser.add_argument(
        '--commpipe',
        '-q',
        default=None,
        type=str,
        help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='retrieve files from the remote server')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = utils.loadConfigFile(args.config)

    # open up the communication pipe using 'projectInterface'
    projectComm = projUtils.initProjectComm(args.commpipe, args.filesremote)

    # initiate the 'fileInterface' class, which will allow you to read and write
    #   files and many other things using functions found in 'fileClient.py'
    #   INPUT:
    #       [1] args.filesremote (to retrieve dicom files from the remote server)
    #       [2] projectComm (communication pipe that is set up above)
    fileInterface = FileInterface(filesremote=args.filesremote,
                                  commPipes=projectComm)

    # now that we have the necessary variables, call the function 'finalize' in
    #   order to actually start reading dicoms and doing your analyses of interest!
    #   INPUT:
    #       [1] cfg (configuration file with important variables)
    #       [2] fileInterface (this will allow a script from the cloud to access files
    #               from the stimulus computer)
    #       [3] projectComm (communication pipe to talk with projectInterface)
    finalize(cfg, fileInterface, projectComm)
    return 0
Пример #7
0
def getStationInformation(config='conf/greenEyes_cluster.toml'):
    allinfo = {}
    cfg = loadConfigFile(config)
    station_FN = cfg.cluster.classifierDir + '/' + cfg.stationDict
    stationDict = np.load(station_FN, allow_pickle=True).item()
    nStations = len(stationDict)
    last_tr_in_station = np.zeros((nStations, ))
    allTR = list(stationDict.values())
    all_station_TRs = [item for sublist in allTR for item in sublist]
    for st in np.arange(nStations):
        last_tr_in_station[st] = stationDict[st][-1]
    return nStations, stationDict, last_tr_in_station, all_station_TRs
Пример #8
0
def getStationInformation(config='conf/greenEyes_cluster.toml'):
    allinfo = {}
    cfg = loadConfigFile(config)
    # make it so it automatically uses the 9 station version
    #station_FN = cfg.cluster.classifierDir + '/' + cfg.stationDict
    station_FN = cfg.cluster.classifierDir + '/' + 'upper_right_winners_nofilter.npy'
    stationDict = np.load(station_FN, allow_pickle=True).item()
    n_stations = len(stationDict)
    last_tr_in_station = np.zeros((n_stations, ))
    allTR = list(stationDict.values())
    all_station_TRs = [item for sublist in allTR for item in sublist]
    for st in np.arange(n_stations):
        last_tr_in_station[st] = stationDict[st][-1]
    return n_stations, stationDict, last_tr_in_station, all_station_TRs
Пример #9
0
 def __init__(self, webDisplayInterface, params, cfg):
     self.webUI = webDisplayInterface
     self.runInfo = StructDict({'threadId': None, 'stopRun': False})
     self.confDir = params.confDir
     self.configFilename = None
     if not os.path.exists(self.confDir):
         os.makedirs(self.confDir)
     if type(cfg) is str:
         self.configFilename = cfg
         cfg = loadConfigFile(self.configFilename)
     self.cfg = cfg
     self.scripts = {}
     self._addScript('mainScript', params.mainScript, 'run')
     self._addScript('initScript', params.initScript, 'init')
     self._addScript('finalizeScript', params.finalizeScript, 'finalize')
Пример #10
0
def main(argv=None):
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default=None,
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--yesToPrompts',
                           '-y',
                           default=False,
                           action='store_true',
                           help='automatically answer tyes to any prompts')
    argParser.add_argument(
        '--archive',
        '-a',
        default=False,
        action='store_true',
        help='Create a Bids Archive from the incoming Bids Incrementals.')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = loadConfigFile(args.config)

    # override config file run and scan values if specified
    if args.runs is not None:
        print("runs: ", args.runs)
        cfg.runNum = [int(x) for x in args.runs.split(',')]

    if args.archive is True:
        cfg.writeBidsArchive = True

    # Initialize the RPC connection to the projectInterface
    # This will give us a dataInterface for retrieving files and
    # a subjectInterface for giving feedback
    clientInterfaces = ClientInterface(yesToPrompts=args.yesToPrompts)
    bidsInterface = clientInterfaces.bidsInterface
    subjInterface = clientInterfaces.subjInterface
    webInterface = clientInterfaces.webInterface

    doRuns(cfg, bidsInterface, subjInterface, webInterface)
    return 0
Пример #11
0
def main(argv=None):
    global verbose, useInitWatch
    """
    This is the main function that is called when you run 'sample.py'.

    Here, you will load the configuration settings specified in the toml configuration
    file, initiate the clientInterface for communication with the projectServer (via
    its sub-interfaces: dataInterface, subjInterface, and webInterface). Ant then call
    the function 'doRuns' to actually start doing the experiment.
    """

    # Some generally recommended arguments to parse for all experiment scripts
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config', '-c', default=defaultConfig, type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs', '-r', default=None, type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans', '-s', default=None, type=str,
                           help='Comma separated list of scan number')
    argParser.add_argument('--yesToPrompts', '-y', default=False, action='store_true',
                           help='automatically answer tyes to any prompts')

    # Some additional parameters only used for this sample project
    argParser.add_argument('--useInitWatch', '-w', default=False, action='store_true',
                           help='use initWatch() functions instead of stream functions')
    argParser.add_argument('--noVerbose', '-nv', default=False, action='store_true',
                           help='print verbose output')

    args = argParser.parse_args(argv)

    useInitWatch = args.useInitWatch
    verbose = not args.noVerbose

    # load the experiment configuration file
    cfg = loadConfigFile(args.config)

    # override config file run and scan values if specified
    if args.runs is not None:
        print("runs: ", args.runs)
        cfg.runNum = [int(x) for x in args.runs.split(',')]
    if args.scans is not None:
        print("scans: ", args.scans)
        cfg.ScanNum = [int(x) for x in args.scans.split(',')]

    # Initialize the RPC connection to the projectInterface.
    # This will give us a dataInterface for retrieving files,
    # a subjectInterface for giving feedback, and a webInterface
    # for updating what is displayed on the experimenter's webpage.
    clientInterfaces = ClientInterface(yesToPrompts=args.yesToPrompts)
    dataInterface = clientInterfaces.dataInterface
    subjInterface = clientInterfaces.subjInterface
    webInterface  = clientInterfaces.webInterface

    # Also try the placeholder for bidsInterface (an upcoming feature)
    bidsInterface = clientInterfaces.bidsInterface
    res = bidsInterface.echo("test")
    print(res)

    # obtain paths for important directories (e.g. location of dicom files)
    if cfg.imgDir is None:
        cfg.imgDir = os.path.join(currPath, 'dicomDir')
    cfg.codeDir = currPath

    # now that we have the necessary variables, call the function 'doRuns' in order
    #   to actually start reading dicoms and doing your analyses of interest!
    #   INPUT:
    #       [1] cfg (configuration file with important variables)
    #       [2] dataInterface (this will allow a script from the cloud to access files
    #            from the stimulus computer that receives dicoms from the Siemens
    #            console computer)
    #       [3] subjInterface - this allows sending feedback (e.g. classification results)
    #            to a subjectService running on the presentation computer to provide
    #            feedback to the subject (and optionally get their response).
    #       [4] webInterface - this allows updating information on the experimenter webpage.
    #            For example to plot data points, or update status messages.
    doRuns(cfg, dataInterface, subjInterface, webInterface)
    return 0
Пример #12
0
def defaultBrowserMainCallback(client, message):
    request = json.loads(message)
    if 'config' in request:
        # Common code for any command that sends config information - retrieve the config info
        cfgData = request['config']
        newCfg = recurseCreateStructDict(cfgData)
        if newCfg is not None:
            Web.cfg = newCfg
        else:
            if cfgData is None:
                errStr = 'browserMainCallback: Config field is None'
            elif type(cfgData) not in (dict, list):
                errStr = 'browserMainCallback: Config field wrong type {}'.format(type(cfgData))
            else:
                errStr = 'browserMainCallback: Error parsing config field {}'.format(cfgData)
            Web.setUserError(errStr)
            return

    cmd = request['cmd']
    logging.log(DebugLevels.L3, "WEB USER CMD: %s", cmd)
    if cmd == "getDefaultConfig":
        # TODO - may need to remove certain fields that can't be jsonified
        if Web.configFilename is not None and Web.configFilename != '':
            cfg = loadConfigFile(Web.configFilename)
        else:
            cfg = Web.cfg
        Web.sendUserConfig(cfg, filename=Web.configFilename)
    elif cmd == "getDataPoints":
        Web.sendUserDataVals(Web.resultVals)
    elif cmd == "clearDataPoints":
        Web.resultVals = [[{'x': 0, 'y': 0}]]
    elif cmd == "run" or cmd == "initSession" or cmd == "finalizeSession":
        if Web.runInfo.threadId is not None:
            Web.runInfo.threadId.join(timeout=1)
            if Web.runInfo.threadId.is_alive():
                Web.setUserError("Client thread already runnning, skipping new request")
                return
            Web.runInfo.threadId = None
        Web.runInfo.stopRun = False
        if cmd == 'run':
            sessionScript = Web.fmriPyScript
            tag = 'running'
            logType = 'run'
        elif cmd == 'initSession':
            sessionScript = Web.initScript
            tag = 'initializing'
            logType = 'prep'
        elif cmd == "finalizeSession":
            sessionScript = Web.finalizeScript
            tag = 'finalizing'
            logType = 'prep'
        if sessionScript is None or sessionScript == '':
            Web.setUserError("{} script not set".format(cmd))
            return
        Web.runInfo.threadId = threading.Thread(name='sessionThread', target=runSession,
                                                args=(Web.cfg, sessionScript,
                                                      Web.filesremote, tag, logType))
        Web.runInfo.threadId.setDaemon(True)
        Web.runInfo.threadId.start()
    elif cmd == "stop":
        if Web.runInfo.threadId is not None:
            Web.runInfo.stopRun = True
            Web.runInfo.threadId.join(timeout=1)
            if not Web.runInfo.threadId.is_alive():
                Web.runInfo.threadId = None
                Web.runInfo.stopRun = False
    elif cmd == "uploadFiles":
        if Web.runInfo.uploadThread is not None:
            Web.runInfo.uploadThread.join(timeout=1)
            if Web.runInfo.uploadThread.is_alive():
                Web.setUserError("Upload thread already runnning, skipping new request")
                return
        Web.runInfo.uploadThread = threading.Thread(name='uploadFiles',
                                                    target=uploadFiles,
                                                    args=(request,))
        Web.runInfo.uploadThread.setDaemon(True)
        Web.runInfo.uploadThread.start()
    else:
        Web.setUserError("unknown command " + cmd)
Пример #13
0
    def start(params, cfg, testMode=False):
        if Web.app is not None:
            raise RuntimeError("Web Server already running.")
        Web.testMode = testMode
        # Set default value before checking for param overrides
        Web.browserMainCallback = defaultBrowserMainCallback
        Web.browserBiofeedCallback = defaultBrowserBiofeedCallback
        Web.eventCallback = defaultEventCallback
        if params.browserMainCallback:
            Web.browserMainCallback = params.browserMainCallback
        if params.browserBiofeedCallback:
            Web.browserBiofeedCallback = params.browserBiofeedCallback
        if params.eventCallback:
            Web.eventCallback = params.eventCallback
        if params.htmlDir:
            Web.htmlDir = params.htmlDir
            Web.webDir = os.path.dirname(Web.htmlDir)
        if params.port:
            Web.httpPort = params.port
        Web.fmriPyScript = params.fmriPyScript
        Web.initScript = params.initScript
        Web.finalizeScript = params.finalizeScript
        Web.filesremote = params.filesremote
        if type(cfg) is str:
            Web.configFilename = cfg
            cfg = loadConfigFile(Web.configFilename)
        Web.cfg = cfg
        if not os.path.exists(Web.confDir):
            os.makedirs(Web.confDir)
        src_root = os.path.join(Web.webDir, 'src')
        css_root = os.path.join(Web.webDir, 'css')
        img_root = os.path.join(Web.webDir, 'img')
        build_root = os.path.join(Web.webDir, 'build')
        cookieSecret = getCookieSecret(certsDir)
        settings = {
            "cookie_secret": cookieSecret,
            "login_url": "/login",
            "xsrf_cookies": True,
            "websocket_max_message_size": 16*1024*1024,
            # "max_message_size": 1024*1024*256,
            # "max_buffer_size": 1024*1024*256,
        }
        Web.app = tornado.web.Application([
            (r'/', Web.UserHttp),
            (r'/login', Web.LoginHandler),
            (r'/logout', Web.LogoutHandler),
            (r'/feedback', Web.BiofeedbackHttp),  # shows image
            (r'/wsUser', Web.UserWebSocket),
            (r'/wsSubject', Web.BiofeedbackWebSocket),
            (r'/wsData', Web.DataWebSocket),
            (r'/wsEvents', Web.EventWebSocket),  # gets signal to change image
            (r'/src/(.*)', tornado.web.StaticFileHandler, {'path': src_root}),
            (r'/css/(.*)', tornado.web.StaticFileHandler, {'path': css_root}),
            (r'/img/(.*)', tornado.web.StaticFileHandler, {'path': img_root}),
            (r'/build/(.*)', tornado.web.StaticFileHandler, {'path': build_root}),
        ], **settings)
        # start event loop if needed
        try:
            asyncio.get_event_loop()
        except RuntimeError as err:
            # RuntimeError thrown if no current event loop
            # Start the event loop
            asyncio.set_event_loop(asyncio.new_event_loop())

        # start thread listening for remote file requests on a default named pipe
        commPipes = makeFifo(pipename=defaultPipeName)
        fifoThread = threading.Thread(name='defaultPipeThread', target=repeatPipeRequestHandler, args=(commPipes,))
        fifoThread.setDaemon(True)
        fifoThread.start()

        if Web.testMode is True:
            print("Listening on: http://localhost:{}".format(Web.httpPort))
            ssl_ctx = None
        else:
            ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
            ssl_ctx.load_cert_chain(getCertPath(certsDir, sslCertFile),
                                    getKeyPath(certsDir, sslPrivateKey))
            print("Listening on: https://localhost:{}".format(Web.httpPort))

        Web.httpServer = tornado.httpserver.HTTPServer(Web.app, ssl_options=ssl_ctx)
        Web.httpServer.listen(Web.httpPort)
        Web.ioLoopInst = tornado.ioloop.IOLoop.current()
        Web.ioLoopInst.start()
Пример #14
0
defaultConfig = os.path.join(currPath, 'conf/faceMatching_organized.toml')

if __name__ == "__main__":
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename=os.path.join(currPath, 'logs/webServer.log'))

    argParser = argparse.ArgumentParser()
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment file (.json or .toml)')
    args = argParser.parse_args()
    # HERE: Set the path to the fMRI Python script to run here
    params = StructDict({
        'fmriPyScript': 'projects/faceMatching/faceMatching.py',
        'filesremote': args.filesremote,
    })

    cfg = loadConfigFile(args.config)

    web = Web()
    web.start(params, cfg)
Пример #15
0
def main(argv=None):
    """
	This is the main function that is called when you run 'finialize.py'.

	Here, you will load the configuration settings specified in the toml configuration 
	file, initiate the class fileInterface, and set up some directories and other 
	important things through 'finalize()'
	"""

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    # This parameter is used for projectInterface
    argParser.add_argument(
        '--commpipe',
        '-q',
        default=None,
        type=str,
        help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='retrieve files from the remote server')
    argParser.add_argument('--addr',
                           '-a',
                           default='localhost',
                           type=str,
                           help='server ip address')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = utils.loadConfigFile(args.config)
    cfg = initialize(cfg, args)
    print(args.config)
    nRunsCompleted = finalize(cfg, args)
    # copy subject folders from server to local
    # subject-specific folder
    # everything in temp/convertedNiftis
    if args.filesremote:

        # open up the communication pipe using 'projectInterface'
        projectComm = projUtils.initProjectComm(args.commpipe,
                                                args.filesremote)

        # initiate the 'fileInterface' class, which will allow you to read and write
        #   files and many other things using functions found in 'fileClient.py'
        #   INPUT:
        #       [1] args.filesremote (to retrieve dicom files from the remote server)
        #       [2] projectComm (communication pipe that is set up above)
        fileInterface = FileInterface(filesremote=args.filesremote,
                                      commPipes=projectComm)

        # we don't need the tmp/convertedNiftis so first remove those
        tempNiftiDir = os.path.join(cfg.server.dataDir, 'tmp/convertedNiftis/')
        if os.path.exists(tempNiftiDir):
            projUtils.deleteFolder(tempNiftiDir)
            print(
                '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
            print('deleting temporary convertedNifti folder: ', tempNiftiDir)
            print(
                '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
        # next, go through each run and put each run data into local run folder
        for r in np.arange(nRunsCompleted):
            runNum = r + 1  # run numbers start at 1
            runId = 'run-{0:02d}'.format(runNum)
            runFolder = os.path.join(cfg.server.subject_full_day_path, runId,
                                     '*')
            listOfFiles = glob.glob(runFolder)
            runFolder_local = os.path.join(cfg.local.subject_full_day_path,
                                           runId)
            projUtils.downloadFilesFromList(fileInterface, listOfFiles,
                                            runFolder_local)
            print(
                '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
            print('downloading data to local computer: ', runFolder)
        # next delete the entire subject folder on the cloud
        # MAKE SURE THIS IS CORRECT FOR YOUR EXPERIMENT BEFORE YOU RUN
        subject_dir = os.path.join(cfg.server.dataDir, cfg.bids_id)
        print('FOLDER TO DELETE ON CLOUD SERVER: ', subject_dir)
        print(
            'IF THIS IS CORRECT, GO BACK TO THE CONFIG FILE USED ON THE WEB SERBER COMPUTER AND CHANGE THE FLAG FROM false --> true IN [server] deleteAfter'
        )
        if cfg.server.deleteAfter:
            print(
                '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
            print('DELETING SUBJECT FOLDER ON CLOUD SERVER: ', subject_dir)
            print(
                '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
            )
            if os.path.exists(subject_dir):
                projUtils.deleteFolder(subject_dir)

    return 0
Пример #16
0
from rtCommon.fileClient import FileInterface
from rtCommon.structDict import StructDict
import rtCommon.dicomNiftiHandler as dnh
import greenEyes
params = {
    'legend.fontsize': 'large',
    'figure.figsize': (5, 3),
    'axes.labelsize': 'x-large',
    'axes.titlesize': 'x-large',
    'xtick.labelsize': 'x-large',
    'ytick.labelsize': 'x-large'
}
font = {'weight': 'bold', 'size': 22}
plt.rc('font', **font)
defaultConfig = os.path.join(os.getcwd(), 'conf/greenEyes_cluster.toml')
cfg = loadConfigFile(defaultConfig)
params = StructDict({
    'config': defaultConfig,
    'runs': '1',
    'scans': '9',
    'webpipe': 'None',
    'webfilesremote': False
})
cfg = greenEyes.initializeGreenEyes(defaultConfig, params)
# date doesn't have to be right, but just make sure subject number, session number, computers are correct


def getClassificationAndScore(data):
    correct_prob = data['correct_prob'][0, :]
    max_ratio = data['max_ratio'][0, :]
    return correct_prob, max_ratio
Пример #17
0
def initializeGreenEyes(configFile,args):
    # load subject information
    # create directories for new niftis
    # randomize which category they'll be attending to and save that
    # purpose: load information and add to configuration things that you won't want to do each time a new file comes in
    # TO RUN AT THE START OF EACH RUN

    cfg = loadConfigFile(configFile)
    if cfg.sessionId in (None, '') or cfg.useSessionTimestamp is True:
        cfg.useSessionTimestamp = True
        cfg.sessionId = dateStr30(time.localtime())
    else:
        cfg.useSessionTimestamp = False
    # MERGE WITH PARAMS
    if args.runs != '' and args.scans != '':
        # use the run and scan numbers passed in as parameters
        cfg.runNum = [int(x) for x in args.runs.split(',')]
        cfg.scanNum = [int(x) for x in args.scans.split(',')]
    else: # when you're not specifying on the command line it's already in a list
        cfg.runNum = [int(x) for x in cfg.runNum]
        cfg.scanNum = [int(x) for x in cfg.scanNum]
    # GET DICOM DIRECTORY
    if cfg.mode != 'debug':
        if cfg.buildImgPath:
            imgDirDate = datetime.datetime.now()
            dateStr = cfg.date.lower()
            if dateStr != 'now' and dateStr != 'today':
                try:
                    imgDirDate = parser.parse(cfg.date)
                except ValueError as err:
                    raise RequestError('Unable to parse date string {} {}'.format(cfg.date, err))
            datestr = imgDirDate.strftime("%Y%m%d")
            imgDirName = "{}.{}.{}".format(datestr, cfg.subjectName, cfg.subjectName)
            cfg.dicomDir = os.path.join(cfg.intelrt.imgDir, imgDirName)
        else:
            cfg.dicomDir = cfg.intelrt.imgDir # then the whole path was supplied
        cfg.dicomNamePattern = cfg.intelrt.dicomNamePattern
    else:
        cfg.dicomDir = glob.glob(cfg.cluster.imgDir.format(cfg.subjectName))[0]
        cfg.dicomNamePattern = cfg.cluster.dicomNamePattern
    #cfg.commPipe = args.commPipe
    #cfg.webfilesremote = args.filesremote # FLAG FOR REMOTE OR LOCAL
	########
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    if cfg.mode == 'local':
        # then all processing is happening on linux too
        cfg.dataDir = cfg.intelrt.codeDir + 'data'
        cfg.classifierDir = cfg.intelrt.classifierDir
        cfg.mask_filename = os.path.join(cfg.intelrt.maskDir, cfg.MASK)
        cfg.MNI_ref_filename = os.path.join(cfg.intelrt.maskDir, cfg.MNI_ref_BOLD)
    elif cfg.mode == 'cloud':
        cfg.dataDir = cfg.cloud.codeDir + 'data'
        cfg.classifierDir = cfg.cloud.classifierDir
        cfg.mask_filename = os.path.join(cfg.cloud.maskDir, cfg.MASK)
        cfg.MNI_ref_filename = os.path.join(cfg.cloud.maskDir, cfg.MNI_ref_BOLD)
        cfg.intelrt.subject_full_day_path = '{0}/data/{1}/{2}'.format(cfg.intelrt.codeDir,cfg.bids_id,cfg.ses_id)
    elif cfg.mode == 'debug':
        cfg.dataDir = cfg.cluster.codeDir + 'data'
        cfg.classifierDir = cfg.cluster.classifierDir
        cfg.mask_filename = cfg.cluster.maskDir + cfg.MASK
        cfg.MNI_ref_filename = cfg.cluster.maskDir + cfg.MNI_ref_BOLD
    cfg.station_stats = cfg.classifierDir + 'station_stats.npz'
    cfg.subject_full_day_path = '{0}/{1}/{2}'.format(cfg.dataDir,cfg.bids_id,cfg.ses_id)
    cfg.temp_nifti_dir = '{0}/converted_niftis/'.format(cfg.subject_full_day_path)
    cfg.subject_reg_dir = '{0}/registration_outputs/'.format(cfg.subject_full_day_path)
    cfg.nStations, cfg.stationsDict, cfg.last_tr_in_station, cfg.all_station_TRs = getStationInformation(cfg)

	# REGISTRATION THINGS
    cfg.wf_dir = '{0}/{1}/ses-{2:02d}/registration/'.format(cfg.dataDir,cfg.bids_id,1)
    cfg.BOLD_to_T1= cfg.wf_dir + 'affine.txt'
    cfg.T1_to_MNI= cfg.wf_dir + 'ants_t1_to_mniComposite.h5'
    cfg.ref_BOLD=cfg.wf_dir + 'ref_image.nii.gz'

    # GET CONVERSION FOR HOW TO FLIP MATRICES
    cfg.axesTransform = getTransform()
    ###### BUILD SUBJECT FOLDERS #######
    return cfg
Пример #18
0
def main(argv=None):
    """
    This is the main function that is called when you run 'sample.py'.

    Here, you will load the configuration settings specified in the toml configuration
    file, initiate the class fileInterface, and then call the function 'doRuns' to
    actually start doing the experiment.
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    # This parameter is used for projectInterface
    argParser.add_argument(
        '--commpipe',
        '-q',
        default=None,
        type=str,
        help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='retrieve dicom files from the remote server')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    cfg = loadConfigFile(args.config)

    # obtain paths for important directories (e.g. location of dicom files)
    if cfg.imgDir is None:
        cfg.imgDir = os.path.join(currPath, 'dicomDir')
    cfg.codeDir = currPath

    # open up the communication pipe using 'projectInterface'
    projectComm = projUtils.initProjectComm(args.commpipe, args.filesremote)

    # initiate the 'fileInterface' class, which will allow you to read and write
    #   files and many other things using functions found in 'fileClient.py'
    #   INPUT:
    #       [1] args.filesremote (to retrieve dicom files from the remote server)
    #       [2] projectComm (communication pipe that is set up above)
    fileInterface = FileInterface(filesremote=args.filesremote,
                                  commPipes=projectComm)

    # now that we have the necessary variables, call the function 'doRuns' in order
    #   to actually start reading dicoms and doing your analyses of interest!
    #   INPUT:
    #       [1] cfg (configuration file with important variables)
    #       [2] fileInterface (this will allow a script from the cloud to access files
    #               from the stimulus computer that receives dicoms from the Siemens
    #               console computer)
    #       [3] projectComm (communication pipe to talk with projectInterface)
    doRuns(cfg, fileInterface, projectComm)

    return 0
Пример #19
0
def initializeFaceMatching(configFile, args):
    # load subject information
    # create directories for new niftis
    # purpose: load information and add to configuration things that you won't want to do each time a new file comes in
    # TO RUN AT THE START OF EACH RUN

    cfg = loadConfigFile(configFile)
    if cfg.sessionId in (None, '') or cfg.useSessionTimestamp is True:
        cfg.useSessionTimestamp = True
        cfg.sessionId = dateStr30(time.localtime())
    else:
        cfg.useSessionTimestamp = False
    # MERGE WITH PARAMS
    if args.runs != '' and args.scans != '':
        # use the run and scan numbers passed in as parameters
        cfg.runNum = [int(x) for x in args.runs.split(',')]
        cfg.scanNum = [int(x) for x in args.scans.split(',')]
    else:  # when you're not specifying on the command line it's already in a list
        cfg.runNum = [int(x) for x in cfg.runNum]
        cfg.scanNum = [int(x) for x in cfg.scanNum]
    # GET DICOM DIRECTORY

    if cfg.buildImgPath:
        imgDirDate = datetime.datetime.now()
        dateStr = cfg.date.lower()
        if dateStr != 'now' and dateStr != 'today':
            try:
                imgDirDate = parser.parse(cfg.date)
            except ValueError as err:
                raise RequestError('Unable to parse date string {} {}'.format(
                    cfg.date, err))
        datestr = imgDirDate.strftime("%Y%m%d")
        imgDirName = "{}.{}.{}".format(datestr, cfg.subjectName,
                                       cfg.subjectName)
        if cfg.mode != 'debug':
            cfg.dicomDir = os.path.join(cfg.intelrt.imgDir, imgDirName)
            cfg.dicomNamePattern = cfg.intelrt.dicomNamePattern
        else:
            cfg.dicomDir = os.path.join(cfg.cluster.imgDir, imgDirName)
            cfg.dicomNamePattern = cfg.cluster.dicomNamePattern
    else:  # if you're naming the full folder directly
        if cfg.mode != 'debug':
            cfg.dicomDir = cfg.intelrt.imgDir  # then the whole path was supplied
            cfg.dicomNamePattern = cfg.intelrt.dicomNamePattern
        else:
            cfg.dicomDir = cfg.cluster.imgDir
            cfg.dicomNamePattern = cfg.cluster.dicomNamePattern

########
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    if cfg.mode == 'local':
        # then all processing is happening on linux too
        cfg.dataDir = cfg.intelrt.codeDir + 'data'
        cfg.mask_filename = os.path.join(cfg.intelrt.maskDir, cfg.MASK)
        cfg.MNI_ref_filename = os.path.join(cfg.intelrt.maskDir,
                                            cfg.MNI_ref_BOLD)
    elif cfg.mode == 'cloud':
        cfg.dataDir = cfg.cloud.codeDir + 'data'
        cfg.mask_filename = os.path.join(cfg.cloud.maskDir, cfg.MASK)
        cfg.MNI_ref_filename = os.path.join(cfg.cloud.maskDir,
                                            cfg.MNI_ref_BOLD)
        cfg.intelrt.subject_full_day_path = '{0}/data/{1}/{2}'.format(
            cfg.intelrt.codeDir, cfg.bids_id, cfg.ses_id)
    elif cfg.mode == 'debug':
        cfg.dataDir = cfg.cluster.codeDir + '/data'
        cfg.mask_filename = os.path.join(cfg.cluster.maskDir, cfg.MASK)
        cfg.MNI_ref_filename = os.path.join(cfg.cluster.maskDir,
                                            cfg.MNI_ref_BOLD)

    cfg.subject_full_day_path = '{0}/{1}/{2}'.format(cfg.dataDir, cfg.bids_id,
                                                     cfg.ses_id)
    cfg.subject_reg_dir = '{0}/registration_outputs/'.format(
        cfg.subject_full_day_path)
    # check that this directory exists
    if not os.path.exists(cfg.subject_reg_dir):
        os.mkdir(cfg.subject_reg_dir)
        print('CREATING REGISTRATION DIRECTORY %s' % cfg.subject_reg_dir)

# REGISTRATION THINGS
    cfg.wf_dir = '{0}/{1}/ses-{2:02d}/registration/'.format(
        cfg.dataDir, cfg.bids_id, 1)
    cfg.BOLD_to_T1 = cfg.wf_dir + 'affine.txt'
    cfg.T1_to_MNI = cfg.wf_dir + 'ants_t1_to_mniComposite.h5'
    cfg.ref_BOLD = cfg.wf_dir + 'ref_image.nii.gz'

    # GET CONVERSION FOR HOW TO FLIP MATRICES
    cfg.axesTransform = getTransform()
    ###### BUILD SUBJECT FOLDERS #######
    return cfg
Пример #20
0
def main(argv=None):
    """
    This is the main function that is called when you run 'sample.py'.

    Here, you will load the configuration settings specified in the toml configuration
    file. It will initiate the class clientInterface which automatically connects
    to the projectServer and allows making requests, such as to get DICOM or NifTi
    data using the dataInterface contained in clientInterface.
    It will then call the function 'doRuns' to actually start doing the experiment.
    """

    # define the parameters that will be recognized later on to set up fileIterface
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=None,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    args = argParser.parse_args(argv)

    # load the experiment configuration file
    print(f'using config file {args.config}')
    cfg = loadConfigFile(args.config)

    # obtain paths for important directories (e.g. location of dicom files)
    if cfg.imgDir is None:
        cfg.imgDir = os.path.join(currPath, 'dicomDir')
    cfg.codeDir = currPath

    # Start a thread generating the synthetic data
    if not os.path.exists(cfg.imgDir):
        os.makedirs(cfg.imgDir)
    generateSyntheticData(cfg)

    # Make an RPC connection to the projectServer
    # The 'dataInterface' class allows you to read and write files from the
    #   control room computer to this script running in the cloud.
    # The 'subjInterface' class will allow us to send classification results
    #   as feedback to the subject at the presentation computer.
    clientRPC = ClientInterface()
    dataInterface = clientRPC.dataInterface
    subjInterface = clientRPC.subjInterface
    webInterface = clientRPC.webInterface

    # now that we have the necessary variables, call the function 'doRuns' in order
    #   to actually start reading dicoms and doing your analyses of interest!
    #   INPUT:
    #       [1] cfg (configuration file with important variables)
    #       [2] dataInterface (this will allow a script from the cloud to access files
    #               from the stimulus computer that receives dicoms from the Siemens
    #               console computer)
    #       [3] subjInterface - this allows sending feedback (e.g. classification results)
    #            to a subjectService running on the presentation computer to provide
    #            feedback to the subject (and optionally get their response).
    #       [4] webInterface - this allows updating information on the experimenter webpage.
    #            For example to plot data points, or update status messages.
    doRuns(cfg, dataInterface, subjInterface, webInterface)

    return 0
Пример #21
0
def main():
    logger = logging.getLogger()
    logger.setLevel(logLevel)
    logging.info('amygActivation: first log message!')
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    argParser.add_argument(
        '--deleteTmpNifti',
        '-d',
        default='1',
        type=str,
        help='Set to 0 if rerunning during a single scanning after error')

    args = argParser.parse_args()

    # Initialize the RPC connection to the projectInterface
    # This will give us a dataInterface for retrieving files and
    # a subjectInterface for giving feedback
    clientInterface = ClientInterface()
    dataInterface = clientInterface.dataInterface
    subjInterface = clientInterface.subjInterface
    webInterface = clientInterface.webInterface
    args.dataRemote = dataInterface.isRunningRemote()

    cfg = utils.loadConfigFile(args.config)
    cfg = initialize(cfg, args)

    # DELETE ALL FILES IF FLAGGED (DEFAULT) #
    if args.deleteTmpNifti == '1':
        deleteTmpFiles(cfg, args)
    else:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('NOT DELETING NIFTIS IN tmp/convertedNiftis')
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')

    createTmpFolder(cfg, args)

    #### MAIN PROCESSING ###
    nRuns = len(cfg.runNum)
    for runIndex in np.arange(nRuns):
        # Steps that we have to do:
        # 1. load run regressor X - ** make run regressor that has TRs -
        # 2. find the happy face trials (happy) X
        # 3. find the rest TRs right before each one  X
        # At every TR --> register to MNI, mask, etc
        # 4. zscore previous rest data (convert + register like before)
        # 5. calculate percent signal change over ROI
        # 6. save as a text file (Every TR-- display can smooth it)

        runNum = cfg.runNum[
            runIndex]  # this will be 1-based now!! it will be the actual run number in case it's out of order
        runId = makeRunHeader(cfg, args, runIndex)
        run = cfg.runNum[runIndex]
        # create run folder
        runFolder = createRunFolder(cfg, args, runNum)
        scanNum = cfg.scanNum[runIndex]
        regressor = makeRunReg(cfg,
                               args,
                               dataInterface,
                               runNum,
                               runFolder,
                               saveMat=1)

        # intialize data stream
        dicomScanNamePattern = utils.stringPartialFormat(
            cfg.dicomNamePattern, 'SCAN', scanNum)
        streamId = dataInterface.initScannerStream(cfg.dicomDir,
                                                   dicomScanNamePattern,
                                                   cfg.minExpectedDicomSize)

        happy_TRs = findConditionTR(regressor, int(cfg.HAPPY))
        happy_TRs_shifted = happy_TRs + cfg.nTR_shift
        happy_TRs_shifted_filenum = happy_TRs_shifted + cfg.nTR_skip  # to account for first 10 files that we're skipping
        happy_blocks = list(split_tol(happy_TRs_shifted, 1))
        TR_per_block = cfg.nTR_block

        fixation_TRs = findConditionTR(regressor, int(cfg.REST))
        fixation_TRs_shifted = fixation_TRs + cfg.nTR_shift
        fixation_blocks = list(split_tol(fixation_TRs_shifted, 1))

        runData = StructDict()
        runData.all_data = np.zeros(
            (cfg.nVox[cfg.useMask], cfg.nTR_run - cfg.nTR_skip))
        runData.percent_change = np.zeros((cfg.nTR_run - cfg.nTR_skip, ))
        runData.percent_change[:] = np.nan
        runData.badVoxels = np.array([])

        TRindex = 0
        for TRFilenum in np.arange(cfg.nTR_skip + 1,
                                   cfg.nTR_run + 1):  # iterate through all TRs
            if TRFilenum == cfg.nTR_skip + 1:  # wait until run starts
                timeout_file = 180
            else:
                timeout_file = 5
            A = time.time()
            dicomFilename = dicomScanNamePattern.format(TR=TRFilenum)
            print(f'Get Dicom: {dicomFilename}')
            dicomData = dataInterface.getImageData(streamId, int(TRFilenum),
                                                   timeout_file)
            if dicomData is None:
                print('Error: getImageData returned None')
                return
            full_nifti_name = convertToNifti(cfg, args, TRFilenum, scanNum,
                                             dicomData)
            print(full_nifti_name)
            print(cfg.MASK_transformed[cfg.useMask])
            maskedData = apply_mask(full_nifti_name,
                                    cfg.MASK_transformed[cfg.useMask])
            runData.all_data[:, TRindex] = maskedData
            B = time.time()
            print('read to mask time: {:5f}'.format(B - A))

            if TRindex in happy_TRs_shifted:  # we're at a happy block
                # now take previous fixation block for z scoring
                this_block = [
                    b for b in np.arange(4) if TRindex in happy_blocks[b]
                ][0]
                fixation_this_block = fixation_blocks[this_block]
                avg_activity, runData = getAvgSignal(fixation_this_block,
                                                     runData, TRindex, cfg)
                runData.percent_change[TRindex] = calculatePercentChange(
                    avg_activity, runData.all_data[:, TRindex])

                text_to_save = '{0:05f}'.format(
                    runData.percent_change[TRindex])
                file_name_to_save = getOutputFilename(
                    run,
                    TRFilenum)  # save as the actual file number, not index
                # now we want to always send this back to the local computer running the display
                full_file_name_to_save = os.path.join(
                    cfg.local.subject_full_day_path, runId, file_name_to_save)
                # Send classification result back to the console computer
                try:
                    dataInterface.putFile(full_file_name_to_save, text_to_save)
                except Exception as err:
                    print('Error putFile: ' + str(err))
                    return
                # JUST TO PLOT ON WEB SERVER
                subjInterface.setResult(run, int(TRFilenum),
                                        float(runData.percent_change[TRindex]))
                webInterface.plotDataPoint(
                    run, int(TRFilenum),
                    float(runData.percent_change[TRindex]))
            TRheader = makeTRHeader(cfg, runIndex, TRFilenum, TRindex,
                                    runData.percent_change[TRindex])
            TRindex += 1

        # SAVE OVER RUN
        runData.scanNum = scanNum  # save scanning number
        runData.subjectName = cfg.subjectName
        runData.dicomDir = cfg.dicomDir
        run_filename = getRunFilename(cfg.sessionId, run)
        full_run_filename_to_save = os.path.join(runFolder, run_filename)
        sio.savemat(full_run_filename_to_save, runData, appendmat=False)

    sys.exit(0)
tmp_folder = f'/gpfs/milgram/scratch60/turk-browne/kp578/{YYYYMMDD}.{LASTNAME}.{PATIENTID}/'
# if os.path.isdir(tmp_folder):
#   shutil.rmtree(tmp_folder)
if not os.path.isdir(tmp_folder):
    os.mkdir(tmp_folder)

tomlFIle = f"/gpfs/milgram/project/turk-browne/users/kp578/realtime/rt-cloud/projects/tProject/conf/tProject.toml"
argParser = argparse.ArgumentParser()
argParser.add_argument('--config',
                       '-c',
                       default=tomlFIle,
                       type=str,
                       help='experiment file (.json or .toml)')
args = argParser.parse_args()
cfg = utils.loadConfigFile(args.config)

subjectFolder = f"{Top_directory}{YYYYMMDD}.{LASTNAME}.{PATIENTID}/"  #20190820.RTtest001.RTtest001: the folder for current subject # For each patient, a new folder will be generated:

dicomFiles = glob(f"{subjectFolder}*")
day2templateVolume_dicom = dicomFiles[int(len(dicomFiles) / 2)]
day1templateFunctionalVolume = '/gpfs/milgram/project/turk-browne/projects/rtcloud_kp/subjects/pilot_sub001/ses1_recognition/run1/nifti/templateFunctionalVolume.nii.gz'
day2templateVolume_fileName = '/gpfs/milgram/project/turk-browne/projects/rtcloud_kp/subjects/pilot_sub001/ses2_recognition/templateFunctionalVolume.nii.gz'
day2templateVolume_nii = dicom2nii(
    day2templateVolume_dicom, day2templateVolume_fileName,
    day1templateFunctionalVolume)  # convert dicom to nifti
# templateVolume=dicom2nii(templateVolume)

main_folder = '/gpfs/milgram/project/turk-browne/projects/rtcloud_kp/'
day2functionalTemplate = f"{main_folder}subjects/{sub}/ses2_recognition/functionalTemplate.nii.gz"
call(f"cp {day2templateVolume_nii} {day2functionalTemplate}", shell=True)