Esempio n. 1
0
    def setup_class(cls):
        utils.installLoggers(logging.DEBUG, logging.DEBUG, filename='logs/tests.log')
        # Start a projectInterface thread running
        params = StructDict({'fmriPyScript': 'projects/sample/sample.py',
                             'filesremote': True,
                             'port': 8921,
                            })
        cfg = StructDict({'sessionId': "test",
                          'subjectName': "test_sample",
                          'subjectNum': 1,
                          'subjectDay': 1,
                          'sessionNum': 1})
        cls.webThread = threading.Thread(name='webThread', target=Web.start, args=(params, cfg, True))
        cls.webThread.setDaemon(True)
        cls.webThread.start()
        time.sleep(.1)

        # Start a fileWatcher thread running
        cls.fileThread = threading.Thread(
            name='fileThread',
            target=WsFileWatcher.runFileWatcher,
            args=('localhost:8921',),
            kwargs={
                'retryInterval': 0.1,
                'allowedDirs': ['/tmp', testDir, samplePath],
                'allowedTypes': fileTypeList,
                'username': '******',
                'password': '******',
                'testMode': True
            }
        )
        cls.fileThread.setDaemon(True)
        cls.fileThread.start()
        time.sleep(1)
Esempio n. 2
0
    def startServers(self,
                     allowedDirs=defaultAllowedDirs,
                     allowedFileTypes=defaultAllowedFileTypes,
                     dataRemote=True, subjectRemote=True):
        # Start the projectServer running
        cfg = StructDict({'sessionId': "test",
                          'subjectName': "test_sample",
                          'subjectNum': 1,
                          'subjectDay': 1,
                          'sessionNum': 1})
        args = StructDict({'config': cfg,
                           'mainScript': 'projects/sample/sample.py',
                           'dataRemote': dataRemote,
                           'subjectRemote': subjectRemote,
                           'port': 8921, 
                           'test': True})
        isRunningEvent = multiprocessing.Event()
        self.projectProc = multiprocessing.Process(target=runProjectServer, args=(args, isRunningEvent))
        self.projectProc.start()
        isRunningEvent.wait()

        if dataRemote is True:
            # Start the dataService running
            args = StructDict({'server': 'localhost:8921',
                               'interval': 0.1,
                               'allowedDirs': allowedDirs,
                               'allowedFileTypes': allowedFileTypes,
                               'username': '******',
                               'password': '******',
                               'test': True,
                               })
            isRunningEvent = multiprocessing.Event()
            self.dataProc = multiprocessing.Process(target=runDataService, args=(args, isRunningEvent))
            self.dataProc.start()
            isRunningEvent.wait()
        else:
            self.dataProc = None

        if subjectRemote is True:
            # Start the subjectService running
            args = StructDict({'server': 'localhost:8921',
                               'interval': 0.1,
                               'username': '******',
                               'password': '******',
                               'test': True,
                               })
            isRunningEvent = multiprocessing.Event()
            self.subjectProc = multiprocessing.Process(target=runSubjectService, args=(args, isRunningEvent))
            self.subjectProc.start()
            isRunningEvent.wait()
            # time.sleep(5)
        else:
            self.subjectProc = None

        return True
Esempio n. 3
0
 def sendDataMsgFromThreadAsync(msg):
     if Web.wsDataConn is None:
         raise StateError("ProjectInterface: FileServer not connected. Please run the fileServer.")
     callId = msg.get('callId')
     if not callId:
         callbackStruct = StructDict()
         callbackStruct.dataConn = Web.wsDataConn
         callbackStruct.numResponses = 0
         callbackStruct.responses = []
         callbackStruct.semaphore = threading.Semaphore(value=0)
         callbackStruct.timeStamp = time.time()
         callbackStruct.msg = msg.copy()
         if 'data' in callbackStruct.msg:
             del callbackStruct.msg['data']
         Web.callbackLock.acquire()
         try:
             Web.dataSequenceNum += 1
             callId = Web.dataSequenceNum
             callbackStruct.callId = callId
             msg['callId'] = callId
             Web.dataCallbacks[callId] = callbackStruct
         finally:
             Web.callbackLock.release()
         Web.ioLoopInst.add_callback(Web.sendDataMessage, msg)
     return callId
Esempio n. 4
0
def main():
    pass
    # example below how to use this code
    # you have to make sure you have certain files in the config file**

    # configFile = 'greenEyes.toml'
    # cfg = initializeGreenEyes(configFile)
    # scanNum = 9
    # TRnum = 11
    # expected_dicom_name = cfg.dicomNamePattern.format(scanNum,TRnum)
    # full_dicom_name = '{0}{1}'.format(cfg.subjectDcmDir,expected_dicom_name)

    # dicomImg = anonymizeDicom(full_dicom_name)
    # saveAsNiftiImage(dicomImg,expected_dicom_name,cfg)

    # # TEST EXACTLY THE SAME
    # f1 = '/jukebox/norman/amennen/github/brainiak/rtAttenPenn/greenEyes/tmp/convertedNiftis/9-11-1.nii.gz'
    # f2 = '/jukebox/norman/amennen/github/brainiak/rtAttenPenn/greenEyes/data/sub-102/ses-02/converted_niftis/9-11-1.nii.gz'

    # obj_1 = nib.load(f1)
    # obj_2 = nib.load(f2)
    # d_1 = obj_1.get_fdata()
    # d_2 = obj_2.get_fdata()

    # np.argwhere(d_1!=d_2)

    cfg = StructDict()
Esempio n. 5
0
def main():
    random.seed(datetime.now())
    # MAKES STRUCT WITH ALL PARAMETERS IN IT
    defaultConfig = os.path.join(currPath , 'conf/greenEyes_organized.toml')
    #defaultConfig = 'conf/greenEyes_organized.toml'
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config', '-c', default=defaultConfig,type=str,
                   help='experiment config file (.json or .toml)')
    argParser.add_argument('--addr', '-a', default='localhost', type=str, 
                   help='server ip address')
    args = argParser.parse_args()
    params = StructDict({'config': args.config})

    cfg = loadConfigFile(params.config)
    #cfg = loadConfigFile(defaultConfig)
    # TESTING
    cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
    cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
    # get subj
    if cfg.machine == 'intel':
        # get intel computer ready
        cfg = buildSubjectFoldersIntelrt(cfg)
        if cfg.subjectDay == 2:
            cluster_wf_dir = '{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.cluster.clusterBidsDir,cfg.subjectNum)
            cluster_BOLD_to_T1 = cluster_wf_dir + '/func_preproc_ses_01_task_examplefunc_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
            cluster_T1_to_MNI = cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
            cluster_ref_BOLD = cluster_wf_dir + '/func_preproc_ses_01_task_examplefunc_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz'
            copyClusterFileToIntel(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
            copyClusterFileToIntel(cluster_T1_to_MNI,cfg.subject_offline_registration_path)
            copyClusterFileToIntel(cluster_ref_BOLD,cfg.subject_offline_registration_path)
            # now see if you need to randomly draw the intepretation
            makeSubjectInterpretation(cfg)
            if cfg.mode == 'cloud': # also copy files to the cloud computer -- easier here to just copy entire folder
                cfg.subject_full_path = '{0}/data/{1}'.format(cfg.intelrt.codeDir,cfg.bids_id)
                locationToSend = '{0}/data/'.format(cfg.cloud.codeDir)
                if args.addr is not 'localhost':
                    copyIntelFolderToCloud(cfg.subject_full_path,locationToSend,args.addr)
                else:
                    logging.warning('YOU NEED TO INPUT CLOUD IP ADDR!!')
                    print('YOU NEED TO INPUT CLOUD IP ADDR!!')
    # elif cfg.machine == 'cloud':
    #     # get cloud computer ready
    #     cfg = buildSubjectFoldersCloud(cfg)
    #     fileInterface = FileInterface()
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.BOLD_to_T1,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.T1_to_MNI,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveIntelFileAndSaveToCloud(cfg.intelrt.ref_BOLD,cfg.subject_offline_registration_path,fileInterface)
    #     retrieveInfelFileAndSaveToCloud(cfg.intelrt.interpretationFile,cfg.subject_full_day_path,fileInterface)
    elif cfg.machine == 'cluster': # running on cluster computer
        cluster_wf_dir='{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.cluster.clusterBidsDir,cfg.subjectNum)
        cluster_BOLD_to_T1= cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
        cluster_T1_to_MNI= cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
        cluster_ref_BOLD=glob.glob(cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz')[0]
        cfg = buildSubjectFoldersCluster(cfg)
        copyClusterFileToCluster(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
        copyClusterFileToCluster(cluster_T1_to_MNI,cfg.subject_offline_registration_path)
        copyClusterFileToCluster(cluster_ref_BOLD,cfg.subject_offline_registration_path)
        makeSubjectInterpretation(cfg)
Esempio n. 6
0
def processPyScriptRequest(request):
    if 'cmd' not in request:
        raise StateError('handleFifoRequests: cmd field not in request: {}'.format(request))
    cmd = request['cmd']
    route = request.get('route')
    localtimeout = request.get('timeout', 10) + 5
    response = StructDict({'status': 200})
    if route == 'dataserver':
        try:
            response = Web.sendDataMsgFromThread(request, timeout=localtimeout)
            if response is None:
                raise StateError('handleFifoRequests: Response None from sendDataMessage')
            if 'status' not in response:
                raise StateError('handleFifoRequests: status field missing from response: {}'.format(response))
            if response['status'] not in (200, 408):
                if 'error' not in response:
                    raise StateError('handleFifoRequests: error field missing from response: {}'.format(response))
                Web.setUserError(response['error'])
                logging.error('handleFifo status {}: {}'.format(response['status'], response['error']))
        except Exception as err:
            errStr = 'SendDataMessage Exception type {}: error {}:'.format(type(err), str(err))
            response = {'status': 400, 'error': errStr}
            Web.setUserError(errStr)
            logging.error('handleFifo Excpetion: {}'.format(errStr))
            raise err
    else:
        if cmd == 'webCommonDir':
            response.filename = CommonOutputDir
        elif cmd == 'resultValue':
            try:
                # forward to bioFeedback Display
                Web.sendBiofeedbackMsgFromThread(json.dumps(request))
                # forward to main browser window
                Web.sendUserMsgFromThread(json.dumps(request))
                # Accumulate results locally to resend to browser as needed
                Web.addResultValue(request)
            except Exception as err:
                errStr = 'SendClassification Exception type {}: error {}:'.format(type(err), str(err))
                response = {'status': 400, 'error': errStr}
                Web.setUserError(errStr)
                logging.error('handleFifo Excpetion: {}'.format(errStr))
                raise err
        elif cmd == 'subjectDisplay':
            logging.info('subjectDisplay projectInterface Callback')
    return response
Esempio n. 7
0
def makeFifo(pipename=None, isServer=True):
    fifodir = '/tmp/pipes/'
    if not os.path.exists(fifodir):
        os.makedirs(fifodir)
    # create new pipe
    if pipename is None:
        fifoname = os.path.join(fifodir,
                                'comm_pipe_{}'.format(int(time.time())))
        if isServer:
            # remove previous temporary named pipes
            for p in Path(fifodir).glob("comm_pipe_*"):
                p.unlink()
    else:
        fifoname = os.path.join(fifodir, pipename)
    # fifo stuct
    commPipes = StructDict()
    if isServer:
        commPipes.name_out = fifoname + '.toclient'
        commPipes.name_in = fifoname + '.fromclient'
    else:
        commPipes.name_out = fifoname + '.fromclient'
        commPipes.name_in = fifoname + '.toclient'

    if not os.path.exists(commPipes.name_out):
        os.mkfifo(commPipes.name_out)
    if not os.path.exists(commPipes.name_in):
        os.mkfifo(commPipes.name_in)
    commPipes.fifoname = fifoname
    return commPipes
Esempio n. 8
0
def makeRunReg(cfg, args, dataInterface, runNum, runFolder, saveMat=1):
    """ make regression for neurofeedback to use """
    # runIndex is 0-based, we'll save as the actual run name
    # get # TRs duration from config file
    runId = 'run-{0:02d}'.format(runNum)
    nReps = int(cfg.nReps)
    nTR_block = int(cfg.nTR_block)
    total_n_blocks = nReps * 3 + 1
    total_n_TRs = total_n_blocks * nTR_block
    regressor = np.zeros((total_n_TRs, ))
    # REST = 0
    # NEUROFEEDBACK = 1
    # MATH = 2
    val_dict = {}
    val_dict['REST'] = int(cfg.REST)
    val_dict['HAPPY'] = int(cfg.HAPPY)
    val_dict['MATH'] = int(cfg.MATH)
    for r in np.arange(nReps):
        first_start = r * (nTR_block * 3)
        first_end = first_start + nTR_block
        regressor[first_start:first_end] = val_dict[cfg.order_block[0]]
        regressor[first_end:first_end +
                  nTR_block] = val_dict[cfg.order_block[1]]
        regressor[first_end + nTR_block:first_end +
                  (2 * nTR_block)] = val_dict[cfg.order_block[2]]
    # save regressor as .mat to load with display
    if saveMat:
        filename = getRegressorName(runNum)
        full_name = "{0}/{1}".format(runFolder, filename)
        regData = StructDict()
        regData.regressor = regressor
        sio.savemat(full_name, regData, appendmat=False)
        if args.dataRemote:
            # save this back to local machine
            # make it into a list to use in the function
            fileList = [full_name]
            local_run_folder = os.path.join(cfg.local.subject_full_day_path,
                                            runId)
            downloadFilesFromList(dataInterface, fileList, local_run_folder)
    # TO DO: put command here to download data to local!
    return regressor
Esempio n. 9
0
 def test_structDict(self):
     print("Test StructDict:")
     a = StructDict()
     a.top = 1
     a.bottom = 3
     a.sub = StructDict()
     a.sub.left = 'corner'
     assert a.top == 1 and a.bottom == 3 and a.sub.left == 'corner'
Esempio n. 10
0
 def __init__(self, webDisplayInterface, params, cfg):
     self.webUI = webDisplayInterface
     self.runInfo = StructDict({'threadId': None, 'stopRun': False})
     self.confDir = params.confDir
     self.configFilename = None
     if not os.path.exists(self.confDir):
         os.makedirs(self.confDir)
     if type(cfg) is str:
         self.configFilename = cfg
         cfg = loadConfigFile(self.configFilename)
     self.cfg = cfg
     self.scripts = {}
     self._addScript('mainScript', params.mainScript, 'run')
     self._addScript('initScript', params.initScript, 'init')
     self._addScript('finalizeScript', params.finalizeScript, 'finalize')
Esempio n. 11
0
 def prepare_request(self, msg):
     """Prepate a request to be sent, including creating a callback structure and unique ID."""
     # Get data server connection the request will be sent on
     websocketState.wsConnLock.acquire()
     try:
         wsConnections = websocketState.wsConnectionLists.get(self.name)
         if wsConnections is None or len(wsConnections) == 0:
             serviceName = 'DataService'
             if self.name == 'wsSubject':
                 serviceName = 'SubjectService'
             raise StateError(
                 f"RemoteService: {serviceName} not connected. Please start the remote service."
             )
         reqConn = wsConnections[-1]  # always use most recent connection
     finally:
         websocketState.wsConnLock.release()
     callId = msg.get('callId')
     if not callId:
         callbackStruct = StructDict()
         callbackStruct.dataConn = reqConn
         callbackStruct.numResponses = 0
         callbackStruct.responses = []
         callbackStruct.semaphore = threading.Semaphore(value=0)
         callbackStruct.timeStamp = time.time()
         callbackStruct.msg = msg.copy()
         if 'data' in callbackStruct.msg:
             del callbackStruct.msg['data']
         self.callbackLock.acquire()
         try:
             self.dataSequenceNum += 1
             callId = self.dataSequenceNum
             callbackStruct.callId = callId
             msg['callId'] = callId
             self.dataCallbacks[callId] = callbackStruct
         finally:
             self.callbackLock.release()
         # self.ioLoopInst.add_callback(Web.sendDataMessage, msg)
     return callId, reqConn
Esempio n. 12
0
def clientSendCmd(commPipes, cmd):
    '''Send a request using named pipes to the projectInterface for handling.
    This allows a separate client process to make requests of the projectInterface process.
    It writes the request on fd_out and recieves the reply on fd_in.
    '''
    data = None
    savedError = None
    incomplete = True
    while incomplete:
        commPipes.fd_out.write(json.dumps(cmd) + os.linesep)
        msg = commPipes.fd_in.readline()
        if len(msg) == 0:
            # fifo closed
            raise StateError('commPipe closed')
        response = json.loads(msg)
        status = response.get('status', -1)
        if status != 200:
            raise RequestError(
                'clientSendCmd: Cmd: {} status {}: error {}'.format(
                    cmd.get('cmd'), status, response.get('error')))
        if 'data' in response:
            try:
                data = unpackDataMessage(response)
            except Exception as err:
                # The call may be incomplete, save the error and keep receiving as needed
                logging.error('clientSendCmd: {}'.format(err))
                if savedError is None:
                    savedError = err
            cmd['callId'] = response.get('callId', -1)
        # Check if need to continue to get more parts
        incomplete = response.get('incomplete', False)
    if savedError:
        raise RequestError('clientSendCmd: {}'.format(savedError))
    retVals = StructDict()
    retVals.statusCode = response.get('status', -1)
    if 'filename' in response:
        retVals.filename = response['filename']
    if 'fileList' in response:
        retVals.fileList = response['fileList']
    if 'fileTypes' in response:
        retVals.fileTypes = response['fileTypes']
    if data:
        retVals.data = data
        if retVals.filename is None:
            raise StateError('clientSendCmd: filename field is None')
    return retVals
Esempio n. 13
0
    def initScannerStream(self,
                          imgDir: str,
                          filePattern: str,
                          minFileSize: int,
                          demoStep: int = 0) -> int:
        """
        Initialize a data stream context with image directory and filepattern.
        Once the stream is initialized call getImageData() to retrieve image data.
        NOTE: currently only one stream at a time is supported.

        Args:
            imgDir: the directory where the images are or will be written from the MRI scanner.
            filePattern: a pattern of the image file names that has a TR tag which will be used
                to index the images, for example 'scan01_{TR:03d}.dcm'. In this example a call to
                getImageData(imgIndex=6) would look for dicom file 'scan01_006.dcm'.

        Returns:
            streamId: An identifier used when calling getImageData()
        """
        self._checkAllowedDirs(imgDir)
        self._checkAllowedFileTypes(filePattern)

        # check that filePattern has {TR} in it
        if not re.match(r'.*{TR.*', filePattern):
            raise InvocationError(
                r"initScannerStream filePattern must have a {TR} pattern")
        self.currentStreamId = self.currentStreamId + 1
        self.streamInfo = StructDict({
            'streamId': self.currentStreamId,
            'type': 'scanner',
            'imgDir': imgDir,
            'filePattern': filePattern,
            'minFileSize': minFileSize,
            'demoStep': demoStep,
            'imgIndex': 0,
        })
        _, file_ext = os.path.splitext(filePattern)
        self.initWatch(imgDir, '*' + file_ext, minFileSize, demoStep)
        return self.currentStreamId
Esempio n. 14
0
 def __init__(self, args):
     self.started = False
     if args is None:
         args = argparse.Namespace()
     if not hasattr(args, 'test') or args.test is None:
         args.test = False
     if not hasattr(args, 'dataRemote') or args.dataRemote is None:
         args.dataRemote = False
     if not hasattr(args, 'subjectRemote') or args.subjectRemote is None:
         args.subjectRemote = False
     if not hasattr(args, 'port') or args.port is None:
         args.port = 8888
     self.args = args
     self.params = StructDict({
         'mainScript': args.mainScript,
         'initScript': args.initScript,
         'finalizeScript': args.finalizeScript,
         'port': args.port,
     })
     self.web = None
     print(
         f'## Settings: dataRemote:{self.args.dataRemote}, subjectRemote:{self.args.subjectRemote}'
     )
Esempio n. 15
0
class Web():
    ''' Cloud service web-interface that is the front-end to the data processing. '''
    app = None
    httpServer = None
    httpPort = 8888
    # Arrays of WebSocket connections that have been established from client windows
    wsBrowserMainConns = []  # type: ignore
    wsBiofeedbackConns = []  # type: ignore
    wsEventConns = []  # type: ignore
    wsDataConn = None  # type: ignore  # Only one data connection
    # Callback functions to invoke when message received from client window connection
    browserMainCallback = None
    browserBiofeedCallback = None
    eventCallback = None
    # Main html page to load
    webDir = os.path.join(rootDir, 'web/')
    confDir = os.path.join(webDir, 'conf/')
    htmlDir = os.path.join(webDir, 'html')
    webIndexPage = 'index.html'
    webLoginPage = 'login.html'
    webBiofeedPage = 'biofeedback.html'
    dataCallbacks = {}
    dataSequenceNum = 0
    cbPruneTime = 0
    # Synchronizing across threads
    callbackLock = threading.Lock()
    wsConnLock = threading.Lock()
    httpLock = threading.Lock()
    ioLoopInst = None
    filesremote = False
    fmriPyScript = None
    initScript = None
    finalizeScript = None
    configFilename = None
    cfg = None
    testMode = False
    runInfo = StructDict({'threadId': None, 'stopRun': False})
    resultVals = [[{'x': 0, 'y': 0}]]

    @staticmethod
    def start(params, cfg, testMode=False):
        if Web.app is not None:
            raise RuntimeError("Web Server already running.")
        Web.testMode = testMode
        # Set default value before checking for param overrides
        Web.browserMainCallback = defaultBrowserMainCallback
        Web.browserBiofeedCallback = defaultBrowserBiofeedCallback
        Web.eventCallback = defaultEventCallback
        if params.browserMainCallback:
            Web.browserMainCallback = params.browserMainCallback
        if params.browserBiofeedCallback:
            Web.browserBiofeedCallback = params.browserBiofeedCallback
        if params.eventCallback:
            Web.eventCallback = params.eventCallback
        if params.htmlDir:
            Web.htmlDir = params.htmlDir
            Web.webDir = os.path.dirname(Web.htmlDir)
        if params.port:
            Web.httpPort = params.port
        Web.fmriPyScript = params.fmriPyScript
        Web.initScript = params.initScript
        Web.finalizeScript = params.finalizeScript
        Web.filesremote = params.filesremote
        if type(cfg) is str:
            Web.configFilename = cfg
            cfg = loadConfigFile(Web.configFilename)
        Web.cfg = cfg
        if not os.path.exists(Web.confDir):
            os.makedirs(Web.confDir)
        src_root = os.path.join(Web.webDir, 'src')
        css_root = os.path.join(Web.webDir, 'css')
        img_root = os.path.join(Web.webDir, 'img')
        build_root = os.path.join(Web.webDir, 'build')
        cookieSecret = getCookieSecret(certsDir)
        settings = {
            "cookie_secret": cookieSecret,
            "login_url": "/login",
            "xsrf_cookies": True,
            "websocket_max_message_size": 16*1024*1024,
            # "max_message_size": 1024*1024*256,
            # "max_buffer_size": 1024*1024*256,
        }
        Web.app = tornado.web.Application([
            (r'/', Web.UserHttp),
            (r'/login', Web.LoginHandler),
            (r'/logout', Web.LogoutHandler),
            (r'/feedback', Web.BiofeedbackHttp),  # shows image
            (r'/wsUser', Web.UserWebSocket),
            (r'/wsSubject', Web.BiofeedbackWebSocket),
            (r'/wsData', Web.DataWebSocket),
            (r'/wsEvents', Web.EventWebSocket),  # gets signal to change image
            (r'/src/(.*)', tornado.web.StaticFileHandler, {'path': src_root}),
            (r'/css/(.*)', tornado.web.StaticFileHandler, {'path': css_root}),
            (r'/img/(.*)', tornado.web.StaticFileHandler, {'path': img_root}),
            (r'/build/(.*)', tornado.web.StaticFileHandler, {'path': build_root}),
        ], **settings)
        # start event loop if needed
        try:
            asyncio.get_event_loop()
        except RuntimeError as err:
            # RuntimeError thrown if no current event loop
            # Start the event loop
            asyncio.set_event_loop(asyncio.new_event_loop())

        # start thread listening for remote file requests on a default named pipe
        commPipes = makeFifo(pipename=defaultPipeName)
        fifoThread = threading.Thread(name='defaultPipeThread', target=repeatPipeRequestHandler, args=(commPipes,))
        fifoThread.setDaemon(True)
        fifoThread.start()

        if Web.testMode is True:
            print("Listening on: http://localhost:{}".format(Web.httpPort))
            ssl_ctx = None
        else:
            ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
            ssl_ctx.load_cert_chain(getCertPath(certsDir, sslCertFile),
                                    getKeyPath(certsDir, sslPrivateKey))
            print("Listening on: https://localhost:{}".format(Web.httpPort))

        Web.httpServer = tornado.httpserver.HTTPServer(Web.app, ssl_options=ssl_ctx)
        Web.httpServer.listen(Web.httpPort)
        Web.ioLoopInst = tornado.ioloop.IOLoop.current()
        Web.ioLoopInst.start()

    @staticmethod
    def stop():
        Web.ioLoopInst.add_callback(Web.ioLoopInst.stop)
        Web.app = None

    @staticmethod
    def close():
        # Currently this should never be called
        raise StateError("Web close() called")

        Web.wsConnLock.acquire()
        try:
            if Web.wsDataConn is not None:
                Web.wsDataConn.close()
            Web.wsDataConn = None

            for client in Web.wsBrowserMainConns[:]:
                client.close()
            Web.wsBrowserMainConns = []

            for client in Web.wsBiofeedbackConns[:]:
                client.close()
            Web.wsBiofeedbackConns = []
        finally:
            Web.wsConnLock.release()

    @staticmethod
    def dataLog(filename, logStr):
        cmd = {'cmd': 'dataLog', 'logLine': logStr, 'filename': filename}
        try:
            response = Web.sendDataMsgFromThread(cmd, timeout=5)
            if response.get('status') != 200:
                logging.warning('Web: dataLog: error {}'.format(response.get('error')))
                return False
        except Exception as err:
            logging.warning('Web: dataLog: error {}'.format(err))
            return False
        return True

    @staticmethod
    def userLog(logStr):
        cmd = {'cmd': 'userLog', 'value': logStr}
        Web.sendUserMsgFromThread(json.dumps(cmd))

    @staticmethod
    def sessionLog(logStr):
        cmd = {'cmd': 'sessionLog', 'value': logStr}
        Web.sendUserMsgFromThread(json.dumps(cmd))

    @staticmethod
    def setUserError(errStr):
        response = {'cmd': 'error', 'error': errStr}
        Web.sendUserMsgFromThread(json.dumps(response))

    @staticmethod
    def sendUserConfig(config, filename=''):
        response = {'cmd': 'config', 'value': config, 'filename': filename}
        Web.sendUserMsgFromThread(json.dumps(response))

    @staticmethod
    def sendUserDataVals(dataPoints):
        response = {'cmd': 'dataPoints', 'value': dataPoints}
        Web.sendUserMsgFromThread(json.dumps(response))

    @staticmethod
    def sendDataMsgFromThreadAsync(msg):
        if Web.wsDataConn is None:
            raise StateError("ProjectInterface: FileServer not connected. Please run the fileServer.")
        callId = msg.get('callId')
        if not callId:
            callbackStruct = StructDict()
            callbackStruct.dataConn = Web.wsDataConn
            callbackStruct.numResponses = 0
            callbackStruct.responses = []
            callbackStruct.semaphore = threading.Semaphore(value=0)
            callbackStruct.timeStamp = time.time()
            callbackStruct.msg = msg.copy()
            if 'data' in callbackStruct.msg:
                del callbackStruct.msg['data']
            Web.callbackLock.acquire()
            try:
                Web.dataSequenceNum += 1
                callId = Web.dataSequenceNum
                callbackStruct.callId = callId
                msg['callId'] = callId
                Web.dataCallbacks[callId] = callbackStruct
            finally:
                Web.callbackLock.release()
            Web.ioLoopInst.add_callback(Web.sendDataMessage, msg)
        return callId

    @staticmethod
    def getDataMsgResponse(callId, timeout=None):
        Web.callbackLock.acquire()
        try:
            callbackStruct = Web.dataCallbacks.get(callId, None)
            if callbackStruct is None:
                raise StateError('sendDataMsgFromThread: no callbackStruct found for callId {}'.format(callId))
        finally:
            Web.callbackLock.release()
        # wait for semaphore signal indicating a callback for this callId has occured
        signaled = callbackStruct.semaphore.acquire(timeout=timeout)
        if signaled is False:
            raise TimeoutError("sendDataMessage: Data Request Timed Out({}) {}".
                               format(timeout, callbackStruct.msg))
        Web.callbackLock.acquire()
        try:
            # Remove from front of list not back to stay in order
            # Can test removing from back of list to make sure out-of-order works too
            response = callbackStruct.responses.pop(0)
            if 'data' in response:
                status = response.get('status', -1)
                numParts = response.get('numParts', 1)
                complete = (callbackStruct.numResponses == numParts and len(callbackStruct.responses) == 0)
                if complete or status != 200:
                    # End the multipart transfer
                    response['incomplete'] = False
                    Web.dataCallbacks.pop(callId, None)
                else:
                    response['incomplete'] = True
        except IndexError:
            raise StateError('sendDataMessage: callbackStruct.response is None for command {}'.
                             format(callbackStruct.msg))
        finally:
            Web.callbackLock.release()
        response['callId'] = callbackStruct.callId
        return response

    @staticmethod
    def sendDataMsgFromThread(msg, timeout=None):
        callId = Web.sendDataMsgFromThreadAsync(msg)
        response = Web.getDataMsgResponse(callId, timeout=timeout)
        return response

    @staticmethod
    def sendDataMessage(cmd):
        ''' This function is called within the ioloop thread by scheduling the call'''
        Web.wsConnLock.acquire()
        try:
            msg = json.dumps(cmd)
            Web.wsDataConn.write_message(msg)
        except Exception as err:
            errStr = 'sendDataMessage error: type {}: {}'.format(type(err), str(err))
            raise RTError(errStr)
        finally:
            Web.wsConnLock.release()

    @staticmethod
    def dataCallback(client, message):
        response = json.loads(message)
        if 'cmd' not in response:
            raise StateError('dataCallback: cmd field missing from response: {}'.format(response))
        if 'status' not in response:
            raise StateError('dataCallback: status field missing from response: {}'.format(response))
        if 'callId' not in response:
            raise StateError('dataCallback: callId field missing from response: {}'.format(response))
        status = response.get('status', -1)
        callId = response.get('callId', -1)
        origCmd = response.get('cmd', 'NoCommand')
        logging.log(DebugLevels.L6, "callback {}: {} {}".format(callId, origCmd, status))
        # Thread Synchronized Section
        Web.callbackLock.acquire()
        try:
            callbackStruct = Web.dataCallbacks.get(callId, None)
            if callbackStruct is None:
                logging.error('ProjectInterface: dataCallback callId {} not found, current callId {}'
                              .format(callId, Web.dataSequenceNum))
                return
            if callbackStruct.callId != callId:
                # This should never happen
                raise StateError('callId mismtach {} {}'.format(callbackStruct.callId, callId))
            callbackStruct.responses.append(response)
            callbackStruct.numResponses += 1
            callbackStruct.semaphore.release()
        except Exception as err:
            logging.error('ProjectInterface: dataCallback error: {}'.format(err))
            raise err
        finally:
            Web.callbackLock.release()
        if time.time() > Web.cbPruneTime:
            Web.cbPruneTime = time.time() + 60
            Web.pruneCallbacks()

    @staticmethod
    def pruneCallbacks():
        numWaitingCallbacks = len(Web.dataCallbacks)
        if numWaitingCallbacks == 0:
            return
        logging.info('Web pruneCallbacks: checking {} callbaks'.format(numWaitingCallbacks))
        Web.callbackLock.acquire()
        try:
            maxSeconds = 300
            now = time.time()
            for callId in Web.dataCallbacks.keys():
                # check how many seconds old each callback is
                cb = Web.dataCallbacks[callId]
                secondsElapsed = now - cb.timeStamp
                if secondsElapsed > maxSeconds:
                    # older than max threshold so remove
                    cb.status = 400
                    cb.error = 'Callback time exceeded max threshold {}s {}s'.format(maxSeconds, secondsElapsed)
                    cb.responses.append({'cmd': 'unknown', 'status': cb.status, 'error': cb.error})
                    for i in range(len(cb.responses)):
                        cb.semaphore.release()
                    del Web.dataCallbacks[callId]
        except Exception as err:
            logging.error('Web pruneCallbacks: error {}'.format(err))
        finally:
            Web.callbackLock.release()

    @staticmethod
    def sendUserMsgFromThread(msg):
        Web.ioLoopInst.add_callback(Web.sendUserMessage, msg)

    @staticmethod
    def sendUserMessage(msg):
        Web.wsConnLock.acquire()
        try:
            for client in Web.wsBrowserMainConns:
                client.write_message(msg)
        finally:
            Web.wsConnLock.release()

    @staticmethod
    def sendBiofeedbackMsgFromThread(msg):
        Web.ioLoopInst.add_callback(Web.sendBiofeedbackMessage, msg)

    @staticmethod
    def sendBiofeedbackMessage(msg):
        Web.wsConnLock.acquire()
        try:
            for client in Web.wsBiofeedbackConns:
                client.write_message(msg)
        finally:
            Web.wsConnLock.release()

    @staticmethod
    def addResultValue(request):
        cmd = request.get('cmd')
        if cmd != 'resultValue':
            logging.warn('addResultValue: wrong cmd type {}'.format(cmd))
            return
        runId = request.get('runId')
        x = request.get('trId')
        y = request.get('value')
        if not isinstance(runId, numbers.Number) or runId <= 0:
            logging.warn('addResultValue: runId wrong val {}'.format(cmd))
            return
        # Make sure resultVals has at least as many arrays as runIds
        for i in range(len(Web.resultVals), runId):
            Web.resultVals.append([])
        if not isinstance(x, numbers.Number):
            # clear plot for this runId
            Web.resultVals[runId-1] = []
            return
        # logging.info("Add resultVal {}, {}".format(x, y))
        runVals = Web.resultVals[runId-1]
        for i, val in enumerate(runVals):
            if val['x'] == x:
                runVals[i] = {'x': x, 'y': y}
                return
        runVals.append({'x': x, 'y': y})

    class UserHttp(tornado.web.RequestHandler):
        def get_current_user(self):
            return self.get_secure_cookie("login", max_age_days=maxDaysLoginCookieValid)

        @tornado.web.authenticated
        def get(self):
            full_path = os.path.join(Web.htmlDir, Web.webIndexPage)
            logging.log(DebugLevels.L6, 'Index request: pwd: {}'.format(full_path))
            Web.httpLock.acquire()
            try:
                self.render(full_path)
            finally:
                Web.httpLock.release()

    class BiofeedbackHttp(tornado.web.RequestHandler):
        def get_current_user(self):
            return self.get_secure_cookie("login", max_age_days=maxDaysLoginCookieValid)

        @tornado.web.authenticated
        def get(self):
            full_path = os.path.join(Web.htmlDir, Web.webBiofeedPage)
            logging.log(DebugLevels.L6, 'Subject feedback http request: pwd: {}'.format(full_path))
            Web.httpLock.acquire()
            try:
                self.render(full_path)
            finally:
                Web.httpLock.release()

    class LoginHandler(tornado.web.RequestHandler):
        loginAttempts = {}
        loginRetryDelay = 10

        def get(self):
            params = {
                "error_msg": '',
                "nextpage": self.get_argument("next", "/")
            }
            full_path = os.path.join(Web.htmlDir, Web.webLoginPage)
            self.render(full_path,  **params)

        def post(self):
            errorReply = None
            try:
                login_name = self.get_argument("name")
                login_passwd = self.get_argument("password")
                if Web.testMode is True:
                    if login_name == login_passwd == 'test':
                        self.set_secure_cookie("login", login_name, expires_days=maxDaysLoginCookieValid)
                        self.redirect(self.get_query_argument('next', '/'))
                        return
                passwdFilename = os.path.join(certsDir, 'passwd')
                passwdDict = loadPasswdFile(passwdFilename)
                if login_name in passwdDict:
                    errorReply = self.checkRetry(login_name)
                    if errorReply is None:
                        hashed_passwd = passwdDict[login_name]
                        # checkpw expects bytes array rather than string so use .encode()
                        if bcrypt.checkpw(login_passwd.encode(), hashed_passwd.encode()) is True:
                            # Remove failed attempts entry
                            del Web.LoginHandler.loginAttempts[login_name]
                            self.set_secure_cookie("login", login_name, expires_days=maxDaysLoginCookieValid)
                            self.redirect(self.get_query_argument('next', '/'))
                            return
                        else:
                            errorReply = 'Login Error: Login Incorrect'
                else:
                    errorReply = self.checkRetry('invalid_user')
                    if errorReply is None:
                        errorReply = 'Login Error: Login Incorrect'
            except Exception as err:
                errorReply = 'Exception: {} {}'.format(type(err), err)
            assert errorReply is not None, "Assert: Web.LoginHandler.error not empty"
            logging.warning('Login Failure: {}'.format(login_name))
            params = {
                "error_msg": errorReply,
                "nextpage": self.get_query_argument('next', '/')
            }
            full_path = os.path.join(Web.htmlDir, Web.webLoginPage)
            self.render(full_path,  **params)

        def checkRetry(self, user):
            '''Keep a dictionary with one entry per username. Any user not in the
                passwd file will be entered as 'invalid_user'. Record login failure
                count and timestamp for when the next retry is allowed. Reset failed
                retry count on successful login. Return message with how many seconds
                until next login attempt is allowed.
            '''
            now = time.time()
            loginAttempts = Web.LoginHandler.loginAttempts
            retryTime = now + Web.LoginHandler.loginRetryDelay
            loginTry = loginAttempts.get(user)
            if loginTry is not None:
                failedLogins = loginTry.get('failedLogins', 0)
                nextAllowedTime = loginTry.get('nextAllowedTime', now)
                # print('user: {}, tries {}, nextTime {}'.format(user, failedLogins, nextAllowedTime))
                if nextAllowedTime > now:
                    delaySecs = loginTry['nextAllowedTime'] - now
                    return 'Next login retry allowed in {} sec'.format(int(delaySecs))
                loginTry['failedLogins'] = failedLogins + 1
                loginTry['nextAllowedTime'] = retryTime
                loginAttempts[user] = loginTry
            else:
                loginAttempts[user] = {'failedLogins': 1, 'nextAllowedTime': retryTime}
            return None

    class LogoutHandler(tornado.web.RequestHandler):
        def get(self):
            self.clear_cookie("login")
            self.redirect("/login")

    class BiofeedbackWebSocket(tornado.websocket.WebSocketHandler):
        # TODO - combine these in-common setups into helper functions
        def open(self):
            user_id = self.get_secure_cookie("login")
            if not user_id:
                response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
                self.write_message(json.dumps(response))
                self.close()
                return
            logging.log(DebugLevels.L1, "Biofeedback WebSocket opened")
            self.set_nodelay(True)
            Web.wsConnLock.acquire()
            try:
                Web.wsBiofeedbackConns.append(self)
            finally:
                Web.wsConnLock.release()

        def on_close(self):
            logging.log(DebugLevels.L1, "Biofeedback WebSocket closed")
            Web.wsConnLock.acquire()
            try:
                if self in Web.wsBiofeedbackConns:
                    Web.wsBiofeedbackConns.remove(self)
            finally:
                Web.wsConnLock.release()

        def on_message(self, message):
            Web.browserBiofeedCallback(self, message)

    class UserWebSocket(tornado.websocket.WebSocketHandler):
        # def get(self, *args, **kwargs):
        #     if self.get_secure_cookie("login"):
        #         super(Web.BiofeedbackWebSocket, self).get(*args, **kwargs)
        #     else:
        #         What to do here when authentication fails?
        #         return

        def open(self):
            user_id = self.get_secure_cookie("login")
            if not user_id:
                response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
                self.write_message(json.dumps(response))
                self.close()
                return
            logging.log(DebugLevels.L1, "User WebSocket opened")
            self.set_nodelay(True)
            Web.wsConnLock.acquire()
            try:
                Web.wsBrowserMainConns.append(self)
            finally:
                Web.wsConnLock.release()

        def on_close(self):
            logging.log(DebugLevels.L1, "User WebSocket closed")
            Web.wsConnLock.acquire()
            try:
                if self in Web.wsBrowserMainConns:
                    Web.wsBrowserMainConns.remove(self)
                else:
                    logging.log(DebugLevels.L1, "on_close: connection not in list")
            finally:
                Web.wsConnLock.release()

        def on_message(self, message):
            Web.browserMainCallback(self, message)

    class EventWebSocket(tornado.websocket.WebSocketHandler):
        def open(self):
            user_id = self.get_secure_cookie("login")
            if not user_id:
                response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
                self.write_message(json.dumps(response))
                self.close()
                return
            logging.log(DebugLevels.L1, "Event WebSocket opened")
            self.set_nodelay(True)
            Web.wsConnLock.acquire()
            try:
                Web.wsEventConns.append(self)
            finally:
                Web.wsConnLock.release()

        def on_close(self):
            logging.log(DebugLevels.L1, "Event WebSocket closed")
            Web.wsConnLock.acquire()
            try:
                if self in Web.wsEventConns:
                    Web.wsEventConns.remove(self)
            finally:
                Web.wsConnLock.release()

        def on_message(self, message):
            Web.eventCallback(self, message)

    class DataWebSocket(tornado.websocket.WebSocketHandler):
        def open(self):
            user_id = self.get_secure_cookie("login")
            if not user_id:
                logging.warning('Data websocket authentication failed')
                response = {'cmd': 'error', 'status': 401, 'error': 'Websocket authentication failed'}
                self.write_message(json.dumps(response))
                self.close()
                return
            logging.log(DebugLevels.L1, "Data WebSocket opened")
            self.set_nodelay(True)
            Web.wsConnLock.acquire()
            try:
                # temporarily cache any previous connection
                prevDataConn = Web.wsDataConn
                # add new connection
                Web.wsDataConn = self
                # If there was a previous connection close it
                if prevDataConn is not None:
                    prevDataConn.close()
            except Exception as err:
                logging.error('ProjectInterface: Open Data Socket error: {}'.format(err))
            finally:
                Web.wsConnLock.release()
            print('DataWebSocket: connected {}'.format(self.request.remote_ip))

        def on_close(self):
            if Web.wsDataConn == self:
                Web.wsConnLock.acquire()
                Web.wsDataConn = None
                Web.wsConnLock.release()
                logging.log(DebugLevels.L1, "Data WebSocket closed")
            else:
                logging.log(DebugLevels.L1, "on_close: Data WebSocket mismatch")
            self.close_pending_requests()

        def close_pending_requests(self):
            Web.callbackLock.acquire()
            try:
                # signal the close to anyone waiting for replies
                callIdsToRemove = []
                for callId, cb in Web.dataCallbacks.items():
                    if cb.dataConn == self:
                        callIdsToRemove.append(callId)
                        cb.status = 499
                        cb.error = 'Client closed connection'
                        # TODO - check this logic
                        cb.responses.append({'cmd': 'unknown', 'status': cb.status, 'error': cb.error})
                        for i in range(len(cb.responses)):
                            cb.semaphore.release()
                for callId in callIdsToRemove:
                    Web.dataCallbacks.pop(callId, None)
            finally:
                Web.callbackLock.release()

        def on_message(self, message):
            try:
                Web.dataCallback(self, message)
            except Exception as err:
                logging.error('DataWebSocket: on_message error: {}'.format(err))
Esempio n. 16
0
defaultConfig = os.path.join(currPath, 'conf/faceMatching_organized.toml')

if __name__ == "__main__":
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename=os.path.join(currPath, 'logs/webServer.log'))

    argParser = argparse.ArgumentParser()
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment file (.json or .toml)')
    args = argParser.parse_args()
    # HERE: Set the path to the fMRI Python script to run here
    params = StructDict({
        'fmriPyScript': 'projects/faceMatching/faceMatching.py',
        'filesremote': args.filesremote,
    })

    cfg = loadConfigFile(args.config)

    web = Web()
    web.start(params, cfg)
Esempio n. 17
0
params = {
    'legend.fontsize': 'large',
    'figure.figsize': (5, 3),
    'axes.labelsize': 'x-large',
    'axes.titlesize': 'x-large',
    'xtick.labelsize': 'x-large',
    'ytick.labelsize': 'x-large'
}
font = {'weight': 'bold', 'size': 22}
plt.rc('font', **font)
defaultConfig = os.path.join(os.getcwd(), 'conf/greenEyes_cluster.toml')
cfg = loadConfigFile(defaultConfig)
params = StructDict({
    'config': defaultConfig,
    'runs': '1',
    'scans': '9',
    'webpipe': 'None',
    'webfilesremote': False
})
cfg = greenEyes.initializeGreenEyes(defaultConfig, params)
# date doesn't have to be right, but just make sure subject number, session number, computers are correct


def getClassificationAndScore(data):
    correct_prob = data['correct_prob'][0, :]
    max_ratio = data['max_ratio'][0, :]
    return correct_prob, max_ratio


def getPatternsData(subjectNum, runNum):
    bids_id = 'sub-{0:03d}'.format(subjectNum)
Esempio n. 18
0
def main():
    logger = logging.getLogger()
    logger.setLevel(logLevel)
    logging.info('GREEN EYES: first log message!')
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config', '-c', default=defaultConfig, type=str,
                       help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs', '-r', default='', type=str,
                       help='Comma separated list of run numbers')
    argParser.add_argument('--scans', '-s', default='', type=str,
                       help='Comma separated list of scan number')
    argParser.add_argument('--deleteTmpNifti', '-d', default='1', type=str,
                       help='Set to 0 if rerunning during a single scanning after error')
    # creates pipe communication link to send/request responses through pipe
    argParser.add_argument('--commpipe', '-q', default=None, type=str,
                       help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote', '-x', default=False, action='store_true',
                       help='dicom files retrieved from remote server')

    args = argParser.parse_args()
    print(args)
    cfg = initializeGreenEyes(args.config,args)

    # DELETE ALL FILES IF FLAGGED TO # 
    if args.deleteTmpNifti == '1':
        deleteTmpFiles(cfg)
    else:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('NOT DELETING NIFTIS IN tmp/convertedNiftis')
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    # DELETE ALL FILES IF FLAGGED TO # 

    # comm pipe
    projComm = projUtils.initProjectComm(args.commpipe,args.filesremote)
    # initialize file interface class -- for now only local
    fileInterface = FileInterface(filesremote=args.filesremote, commPipes=projComm)
    # intialize watching in particular directory
    fileInterface.initWatch(cfg.dicomDir, cfg.dicomNamePattern, cfg.minExpectedDicomSize) 
    story_TRs = cfg.story_TR_2 - cfg.story_TR_1 + 1
    #### MAIN PROCESSING ###
    nRuns = len(cfg.runNum)
    for runIndex in np.arange(nRuns):
        runData = StructDict()
        runData.cheating_probability = np.zeros((cfg.nStations,))
        runData.zTransferred = np.zeros((cfg.nStations,))
        runData.correct_prob = np.zeros((cfg.nStations,))
        runData.interpretation = getSubjectInterpretation(cfg)
        runData.badVoxels = {}
        runData.dataForClassification = {}
        all_data = np.zeros((cfg.nVox,cfg.nTR_run + 1)) # adding 1 because we're not starting at 0 with the indexing
        runData.story_data = np.zeros((cfg.nVox,story_TRs))

        makeRunHeader(cfg,runIndex)
        run = cfg.runNum[runIndex]
        scanNum = cfg.scanNum[runIndex]
        storyTRCount = 0
        stationInd=0
        for TRFilenum in np.arange(cfg.nTR_skip+1,cfg.nTR_run+1):
        # for TRFilenum in np.arange(11,54):
            if TRFilenum == cfg.nTR_skip+1: # wait until run starts
                timeout_file = 180
            else:
                timeout_file = 5
            A = time.time()
            dicomData = readRetryDicomFromFileInterface(fileInterface, getDicomFileName(cfg, scanNum, TRFilenum), timeout=timeout_file)
            full_nifti_name = convertToNifti(TRFilenum,scanNum,cfg,dicomData)
            registeredFileName = registerNewNiftiToMNI(cfg,full_nifti_name)
            maskedData = apply_mask(registeredFileName,cfg.mask_filename)
            all_data[:,TRFilenum] = maskedData
            B = time.time()
            print('read to mask time: {:5f}'.format(B-A))
            if TRFilenum >= cfg.fileNum_story_TR_1 and TRFilenum <= cfg.fileNum_story_TR_2: # we're at a story TR now
                runData.story_data[:,storyTRCount] = maskedData
                if np.any(storyTRCount == cfg.last_tr_in_station.astype(int)):
                    # NOW PREPROCESS AND CLASSIFY
                    stationInd = np.argwhere(storyTRCount == cfg.last_tr_in_station.astype(int))[0][0]
                    A = time.time()
                    runData = preprocessAndPredict(cfg,runData,storyTRCount)
                    B = time.time()
                    print('preprocessAndPredict time: {:5f}'.format(B-A))
                    text_to_save = '{0:05f}'.format(runData.correct_prob[stationInd])
                    file_name_to_save = getStationClassoutputFilename(run, stationInd)
                    if cfg.mode == 'cloud':
                        full_filename_to_save = os.path.join(cfg.intelrt.subject_full_day_path,file_name_to_save) 
                    else:
                        full_filename_to_save = os.path.join(cfg.subject_full_day_path,file_name_to_save) 
                    fileInterface.putTextFile(full_filename_to_save,text_to_save)
                    
                    if args.commpipe:    
                        # JUST TO PLOT ON WEB SERVER

                        projUtils.sendResultToWeb(projComm, run,int(stationInd) ,runData.correct_prob[stationInd] )
                storyTRCount += 1
            TRheader = makeTRHeader(cfg,runIndex,TRFilenum,storyTRCount-1,stationInd,runData.correct_prob[stationInd])

        # SAVE OVER RUN NP FILE
        runData.scanNum = scanNum # save scanning number
        runData.subjectName = cfg.subjectName
        runData.dicomDir = cfg.dicomDir
        run_filename = getRunFilename(cfg.sessionId, run)
        full_run_filename_to_save = os.path.join(cfg.subject_full_day_path,run_filename)
        #try:
        sio.savemat(full_run_filename_to_save, runData, appendmat=False)
        #except Exception as err:
        #    errorReply = self.createReplyMessage(msg, MsgResult.Errsor)
        #    errorReply.data = "Error: Unable to save blkGrpFile %s: %r" % (blkGrpFilename, err)
        #    return errorReply

    # DELETE ALL FILES IF FLAGGED TO # 
    # REPEAT AT THE END OF THE RUN AS WELL
    if args.deleteTmpNifti == '1':
        deleteTmpFiles(cfg)
    else:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('NOT DELETING NIFTIS IN tmp/convertedNiftis')
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    # DELETE ALL FILES IF FLAGGED TO # 
    sys.exit(0)
Esempio n. 19
0
    def startServers(self,
                     allowedDirs=defaultAllowedDirs,
                     allowedFileTypes=defaultAllowedFileTypes,
                     dataRemote=True,
                     subjectRemote=True,
                     exampleRemote=False,
                     projectArgs=defaultProjectArgs):
        if exampleRemote is True:
            # example remote uses the wsData websocket channel
            dataRemote = True

        projectArgs['dataRemote'] = dataRemote
        projectArgs['subjectRemote'] = subjectRemote

        # Start the projectServer running
        isRunningEvent = multiprocessing.Event()
        self.projectProc = multiprocessing.Process(target=runProjectServer,
                                                   args=(projectArgs,
                                                         isRunningEvent))
        self.projectProc.start()
        isRunningEvent.wait()

        if dataRemote is True:
            # Start the dataService running
            args = StructDict({
                'server': f'localhost:{testPort}',
                'interval': 0.1,
                'allowedDirs': allowedDirs,
                'allowedFileTypes': allowedFileTypes,
                'username': '******',
                'password': '******',
                'test': True,
            })
            isRunningEvent = multiprocessing.Event()
            self.dataProc = multiprocessing.Process(target=runDataService,
                                                    args=(args,
                                                          isRunningEvent))
            self.dataProc.start()
            isRunningEvent.wait()
        else:
            self.dataProc = None

        if subjectRemote is True:
            # Start the subjectService running
            args = StructDict({
                'server': f'localhost:{testPort}',
                'interval': 0.1,
                'username': '******',
                'password': '******',
                'test': True,
            })
            isRunningEvent = multiprocessing.Event()
            self.subjectProc = multiprocessing.Process(
                target=runSubjectService, args=(args, isRunningEvent))
            self.subjectProc.start()
            isRunningEvent.wait()
            # time.sleep(5)
        else:
            self.subjectProc = None

        if exampleRemote is True:
            # Start the exampleService running
            args = StructDict({
                'server': f'localhost:{testPort}',
                'interval': 0.1,
                'username': '******',
                'password': '******',
                'test': True,
            })
            isRunningEvent = multiprocessing.Event()
            self.exampleProc = multiprocessing.Process(
                target=runExampleService, args=(args, isRunningEvent))
            self.exampleProc.start()
            isRunningEvent.wait()
            # time.sleep(5)
        else:
            self.exampleProc = None

        return True
Esempio n. 20
0
def main():
    logger = logging.getLogger()
    logger.setLevel(logLevel)
    logging.info('amygActivation: first log message!')
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    argParser.add_argument(
        '--deleteTmpNifti',
        '-d',
        default='1',
        type=str,
        help='Set to 0 if rerunning during a single scanning after error')

    args = argParser.parse_args()

    # Initialize the RPC connection to the projectInterface
    # This will give us a dataInterface for retrieving files and
    # a subjectInterface for giving feedback
    clientInterface = ClientInterface()
    dataInterface = clientInterface.dataInterface
    subjInterface = clientInterface.subjInterface
    webInterface = clientInterface.webInterface
    args.dataRemote = dataInterface.isRunningRemote()

    cfg = utils.loadConfigFile(args.config)
    cfg = initialize(cfg, args)

    # DELETE ALL FILES IF FLAGGED (DEFAULT) #
    if args.deleteTmpNifti == '1':
        deleteTmpFiles(cfg, args)
    else:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('NOT DELETING NIFTIS IN tmp/convertedNiftis')
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')

    createTmpFolder(cfg, args)

    #### MAIN PROCESSING ###
    nRuns = len(cfg.runNum)
    for runIndex in np.arange(nRuns):
        # Steps that we have to do:
        # 1. load run regressor X - ** make run regressor that has TRs -
        # 2. find the happy face trials (happy) X
        # 3. find the rest TRs right before each one  X
        # At every TR --> register to MNI, mask, etc
        # 4. zscore previous rest data (convert + register like before)
        # 5. calculate percent signal change over ROI
        # 6. save as a text file (Every TR-- display can smooth it)

        runNum = cfg.runNum[
            runIndex]  # this will be 1-based now!! it will be the actual run number in case it's out of order
        runId = makeRunHeader(cfg, args, runIndex)
        run = cfg.runNum[runIndex]
        # create run folder
        runFolder = createRunFolder(cfg, args, runNum)
        scanNum = cfg.scanNum[runIndex]
        regressor = makeRunReg(cfg,
                               args,
                               dataInterface,
                               runNum,
                               runFolder,
                               saveMat=1)

        # intialize data stream
        dicomScanNamePattern = utils.stringPartialFormat(
            cfg.dicomNamePattern, 'SCAN', scanNum)
        streamId = dataInterface.initScannerStream(cfg.dicomDir,
                                                   dicomScanNamePattern,
                                                   cfg.minExpectedDicomSize)

        happy_TRs = findConditionTR(regressor, int(cfg.HAPPY))
        happy_TRs_shifted = happy_TRs + cfg.nTR_shift
        happy_TRs_shifted_filenum = happy_TRs_shifted + cfg.nTR_skip  # to account for first 10 files that we're skipping
        happy_blocks = list(split_tol(happy_TRs_shifted, 1))
        TR_per_block = cfg.nTR_block

        fixation_TRs = findConditionTR(regressor, int(cfg.REST))
        fixation_TRs_shifted = fixation_TRs + cfg.nTR_shift
        fixation_blocks = list(split_tol(fixation_TRs_shifted, 1))

        runData = StructDict()
        runData.all_data = np.zeros(
            (cfg.nVox[cfg.useMask], cfg.nTR_run - cfg.nTR_skip))
        runData.percent_change = np.zeros((cfg.nTR_run - cfg.nTR_skip, ))
        runData.percent_change[:] = np.nan
        runData.badVoxels = np.array([])

        TRindex = 0
        for TRFilenum in np.arange(cfg.nTR_skip + 1,
                                   cfg.nTR_run + 1):  # iterate through all TRs
            if TRFilenum == cfg.nTR_skip + 1:  # wait until run starts
                timeout_file = 180
            else:
                timeout_file = 5
            A = time.time()
            dicomFilename = dicomScanNamePattern.format(TR=TRFilenum)
            print(f'Get Dicom: {dicomFilename}')
            dicomData = dataInterface.getImageData(streamId, int(TRFilenum),
                                                   timeout_file)
            if dicomData is None:
                print('Error: getImageData returned None')
                return
            full_nifti_name = convertToNifti(cfg, args, TRFilenum, scanNum,
                                             dicomData)
            print(full_nifti_name)
            print(cfg.MASK_transformed[cfg.useMask])
            maskedData = apply_mask(full_nifti_name,
                                    cfg.MASK_transformed[cfg.useMask])
            runData.all_data[:, TRindex] = maskedData
            B = time.time()
            print('read to mask time: {:5f}'.format(B - A))

            if TRindex in happy_TRs_shifted:  # we're at a happy block
                # now take previous fixation block for z scoring
                this_block = [
                    b for b in np.arange(4) if TRindex in happy_blocks[b]
                ][0]
                fixation_this_block = fixation_blocks[this_block]
                avg_activity, runData = getAvgSignal(fixation_this_block,
                                                     runData, TRindex, cfg)
                runData.percent_change[TRindex] = calculatePercentChange(
                    avg_activity, runData.all_data[:, TRindex])

                text_to_save = '{0:05f}'.format(
                    runData.percent_change[TRindex])
                file_name_to_save = getOutputFilename(
                    run,
                    TRFilenum)  # save as the actual file number, not index
                # now we want to always send this back to the local computer running the display
                full_file_name_to_save = os.path.join(
                    cfg.local.subject_full_day_path, runId, file_name_to_save)
                # Send classification result back to the console computer
                try:
                    dataInterface.putFile(full_file_name_to_save, text_to_save)
                except Exception as err:
                    print('Error putFile: ' + str(err))
                    return
                # JUST TO PLOT ON WEB SERVER
                subjInterface.setResult(run, int(TRFilenum),
                                        float(runData.percent_change[TRindex]))
                webInterface.plotDataPoint(
                    run, int(TRFilenum),
                    float(runData.percent_change[TRindex]))
            TRheader = makeTRHeader(cfg, runIndex, TRFilenum, TRindex,
                                    runData.percent_change[TRindex])
            TRindex += 1

        # SAVE OVER RUN
        runData.scanNum = scanNum  # save scanning number
        runData.subjectName = cfg.subjectName
        runData.dicomDir = cfg.dicomDir
        run_filename = getRunFilename(cfg.sessionId, run)
        full_run_filename_to_save = os.path.join(runFolder, run_filename)
        sio.savemat(full_run_filename_to_save, runData, appendmat=False)

    sys.exit(0)
Esempio n. 21
0
from rtCommon.structDict import StructDict
from rtCommon.clientInterface import ClientInterface
from projects.openNeuroClient import openNeuroClient
from tests.backgroundTestServers import BackgroundTestServers
from tests.common import rtCloudPath, testPort, tmpDir

openNeuroProjectPath = os.path.join(rtCloudPath, 'projects', 'openNeuroClient')
openNeuroClientPath = os.path.join(openNeuroProjectPath, 'openNeuroClient.py')

allowedDirs = [tmpDir, openNeuroProjectPath]
allowedFileTypes = ['.dcm', '.txt']

openNeuroCfg = StructDict({
    'sessionId': "openNeuroTest",
    'dsAccessionNumber': 'ds002338',
    'subjectName': "xp201",
    'subjectDay': 1,
    'runNum': [1],
    'scanNum': [1]
})

openNeuroArgs = StructDict({
    'config': openNeuroCfg,
    'mainScript': openNeuroClientPath,
    'port': testPort,
    'test': True
})


class TestOpenNeuroClient:
    serversForTests = None
    pingCount = 0
Esempio n. 22
0
if __name__ == "__main__":
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename=os.path.join(currPath, 'logs/webServer.log'))

    argParser = argparse.ArgumentParser()
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment file (.json or .toml)')
    args = argParser.parse_args()
    # HERE: Set the path to the fMRI Python script to run here
    params = StructDict({
        'fmriPyScript': expScript,
        'initScript': initScript,
        'finalizeScript': finalizeScript,
        'filesremote': args.filesremote,
        'port': 8888,
    })

    cfg = loadConfigFile(args.config)

    web = Web()
    web.start(params, cfg)
Esempio n. 23
0
def unpackDataMessage(msg):
    global multiPartDataCache
    try:
        if msg.get('status') != 200:
            # On error delete any partial transfers
            fileHash = msg.get('fileHash')
            if fileHash is not None and fileHash in multiPartDataCache:
                del multiPartDataCache[fileHash]
            raise RequestError('unpackDataMessage: {} {}'.format(
                msg.get('status'), msg.get('error')))
        data = decodeMessageData(msg)
        multipart = msg.get('multipart', False)
        numParts = msg.get('numParts', 1)
        partId = msg.get('partId', 1)
        logging.debug('unpackDataMessage: callid {}, part {} of {}'.format(
            msg.get('callId'), partId, numParts))
        if multipart is False or numParts == 1:
            # All data sent in a single message
            return data
        else:
            assert numParts > 1
            assert multipart is True
            if partId > numParts:
                raise RequestError(
                    'unpackDataMessage: Inconsistent parts: partId {} exceeds numParts {}'
                    .format(partId, numParts))
            # get the data structure for this data
            fileHash = msg.get('fileHash')
            if partId > 1:
                partialDataStruct = multiPartDataCache.get(fileHash)
                if partialDataStruct is None:
                    raise RequestError(
                        'unpackDataMessage: partialDataStruct not found')
            else:
                partialDataStruct = StructDict({
                    'cachedDataParts': [None] * numParts,
                    'numCachedParts': 0
                })
                multiPartDataCache[fileHash] = partialDataStruct
            partialDataStruct.cachedDataParts[partId - 1] = data
            partialDataStruct.numCachedParts += 1
            if partialDataStruct.numCachedParts == numParts:
                # All parts of the multipart transfer have been received
                # Concatenate the data into one bytearray
                data = bytearray()
                for i in range(numParts):
                    dataPart = partialDataStruct.cachedDataParts[i]
                    if dataPart is None:
                        raise StateError(
                            'unpackDataMessage: missing dataPart {}'.format(i))
                    data.extend(dataPart)
                # Check fileHash and fileSize
                dataHash = hashlib.md5(data).hexdigest()
                dataSize = len(data)
                if dataHash != fileHash:
                    raise RequestError(
                        "unpackDataMessage: File checksum mismatch {} {}".
                        format(dataHash, fileHash))
                if dataSize != msg.get('fileSize', 0):
                    raise RequestError(
                        "unpackDataMessage: File size mismatch {} {}".format(
                            dataSize, msg.get('fileSize', 0)))
                # delete the multipart data cache for this item
                del multiPartDataCache[fileHash]
                return data
        # Multi-part transfer not complete, nothing to return
        return None
    except Exception as err:
        # removed any cached data
        fileHash = msg.get('fileHash')
        if fileHash and fileHash in multiPartDataCache:
            del multiPartDataCache[fileHash]
        raise err
Esempio n. 24
0
def main():
    logger = logging.getLogger()
    logger.setLevel(logLevel)
    logging.info('Face matching: first log message!')
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment config file (.json or .toml)')
    argParser.add_argument('--runs',
                           '-r',
                           default='',
                           type=str,
                           help='Comma separated list of run numbers')
    argParser.add_argument('--scans',
                           '-s',
                           default='',
                           type=str,
                           help='Comma separated list of scan number')
    argParser.add_argument(
        '--deleteTmpNifti',
        '-d',
        default='1',
        type=str,
        help='Set to 0 if rerunning during a single scanning after error')
    # creates pipe communication link to send/request responses through web pipe
    argParser.add_argument(
        '--commpipe',
        '-q',
        default=None,
        type=str,
        help='Named pipe to communicate with projectInterface')
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')

    args = argParser.parse_args()
    print(args)
    cfg = initializeFaceMatching(args.config, args)

    # DELETE ALL FILES IF FLAGGED TO #
    if args.deleteTmpNifti == '1':
        deleteTmpFiles(cfg)
    else:
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
        print('NOT DELETING NIFTIS IN tmp/convertedNiftis')
        print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    # DELETE ALL FILES IF FLAGGED TO #

    # comm pipe
    projComm = projUtils.initProjectComm(args.commpipe, args.filesremote)
    fileInterface = FileInterface(filesremote=args.filesremote,
                                  commPipes=projComm)
    # intialize watching in particular directory
    fileInterface.initWatch(cfg.dicomDir, cfg.dicomNamePattern,
                            cfg.minExpectedDicomSize)
    #### MAIN PROCESSING ###
    nRuns = len(cfg.runNum)
    for runIndex in np.arange(nRuns):
        # Steps that we have to do:
        # 1. load run regressor X
        # 2. find the emotional face trials (happy) X
        # 3. find the rest TRs right before each one  X
        # At every TR --> register to MNI, mask, etc
        # 4. zscore previous rest data (convert + register like before)
        # 5. calculate percent signal change over ROI
        # 6. save as a text file (Every TR-- display can smooth it)
        # LOAD RUN REGRESSOR
        runNum = runIndex + 1
        regressor = getRegressorMatrix(cfg, runNum)
        happy_TRs = findConditionTR(regressor,
                                    int(cfg.HAPPY))  # 3 blocks 12 TRs each
        happy_TRs_shifted = happy_TRs + cfg.nTR_shift
        happy_TRs_shifted_filenum = happy_TRs_shifted + cfg.nTR_skip  # to account for first 2 files that we're skipping
        neutral_TRs = findConditionTR(regressor, int(cfg.NEUTRAL))
        neutral_TRs_shifted = neutral_TRs + cfg.nTR_shift
        object_TRs = findConditionTR(regressor, int(cfg.OBJECT))
        object_TRs_shifted = object_TRs + cfg.nTR_shift
        nBlocks = np.shape(happy_TRs)[0]
        nTR_per_block = np.shape(happy_TRs)[1]
        fixation_TRs, fixation_blocks = findFixationTR(regressor)
        fixation_TRs_shifted = fixation_TRs + cfg.nTR_shift
        fixation_blocks_shifted = fixation_blocks + cfg.nTR_shift
        all_other_categories_shifted = np.concatenate(
            (neutral_TRs_shifted, object_TRs_shifted, fixation_blocks_shifted),
            axis=0).flatten()

        runData = StructDict()
        runData.all_data = np.zeros((cfg.nVox, cfg.nTR_run - cfg.nTR_skip))
        runData.percent_change = np.zeros((cfg.nTR_run - cfg.nTR_skip, ))
        runData.percent_change[:] = np.nan
        runData.badVoxels = np.array([])

        makeRunHeader(cfg, runIndex)
        run = cfg.runNum[runIndex]
        scanNum = cfg.scanNum[runIndex]
        TRindex = 0
        for TRFilenum in np.arange(cfg.nTR_skip + 1,
                                   cfg.nTR_run + 1):  # iterate through all TRs
            if TRFilenum == cfg.nTR_skip + 1:  # wait until run starts
                timeout_file = 180
            else:
                timeout_file = 5
            A = time.time()
            dicomData = readRetryDicomFromFileInterface(fileInterface,
                                                        getDicomFileName(
                                                            cfg, scanNum,
                                                            TRFilenum),
                                                        timeout=timeout_file)
            full_nifti_name = convertToNifti(TRFilenum, scanNum, cfg,
                                             dicomData)
            registeredFileName = registerNewNiftiToMNI(cfg, full_nifti_name)
            maskedData = apply_mask(registeredFileName, cfg.mask_filename)
            runData.all_data[:, TRindex] = maskedData
            B = time.time()
            print('read to mask time: {:5f}'.format(B - A))

            if TRindex in happy_TRs_shifted:  # we're at a happy block
                # now get TRs to use for zscoring
                TRs_to_use_other_categories = np.sort(
                    all_other_categories_shifted[
                        all_other_categories_shifted < TRindex])
                avg_activity, runData = getAvgSignal(
                    TRs_to_use_other_categories, runData, TRindex, cfg)
                runData.percent_change[TRindex] = calculatePercentChange(
                    avg_activity, runData.all_data[:, TRindex])

                text_to_save = '{0:05f}'.format(
                    runData.percent_change[TRindex])
                file_name_to_save = getOutputFilename(run, TRindex)
                if cfg.mode == 'cloud':
                    full_filename_to_save = os.path.join(
                        cfg.intelrt.subject_full_day_path, file_name_to_save)
                else:
                    full_filename_to_save = os.path.join(
                        cfg.subject_full_day_path, file_name_to_save)
                fileInterface.putTextFile(full_filename_to_save, text_to_save)
                if args.commpipe:
                    # JUST TO PLOT ON WEB SERVER
                    projUtils.sendResultToWeb(projComm, run, int(TRindex),
                                              runData.percent_change[TRindex])
            TRheader = makeTRHeader(cfg, runIndex, TRFilenum, TRindex,
                                    runData.percent_change[TRindex])
            TRindex += 1
        # SAVE OVER RUN NP FILE
        runData.scanNum = scanNum  # save scanning number
        runData.subjectName = cfg.subjectName
        runData.dicomDir = cfg.dicomDir
        run_filename = getRunFilename(cfg.sessionId, run)
        full_run_filename_to_save = os.path.join(cfg.subject_full_day_path,
                                                 run_filename)
        #try:
        sio.savemat(full_run_filename_to_save, runData, appendmat=False)
        #except Exception as err:
        #    errorReply = self.createReplyMessage(msg, MsgResult.Errsor)
        #    errorReply.data = "Error: Unable to save blkGrpFile %s: %r" % (blkGrpFilename, err)
        #    return errorReply
    sys.exit(0)
Esempio n. 25
0
                   logging.INFO,
                   filename=os.path.join(currPath, 'logs/sample.log'))
    argParser = argparse.ArgumentParser()
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment file (.json or .toml)')
    argParser.add_argument(
        '--test',
        '-t',
        default=False,
        action='store_true',
        help='start projectInterface in test mode, unsecure')
    args = argParser.parse_args()

    params = StructDict({
        'fmriPyScript': scriptToRun,
        'initScript': initScript,
        'finalizeScript': finalizeScript,
        'filesremote': args.filesremote,
    })

    web = Web()
    web.start(params, args.config, testMode=args.test)
Esempio n. 26
0
# OR FOR VM
sys.path.append('/home/amennen/code/rt-cloud')
# OR FOR INTELRT
sys.path.append('/Data1/code/rt-cloud/')
from rtCommon.utils import loadConfigFile, dateStr30, DebugLevels, writeFile, loadMatFile
from rtCommon.readDicom import readDicomFromBuffer
from rtCommon.fileClient import FileInterface
import rtCommon.webClientUtils as wcutils
from rtCommon.structDict import StructDict
import rtCommon.dicomNiftiHandler as dnh
import greenEyes

subject=102
#conf='/home/amennen/code/rt-cloud/projects/greenEyes/conf/greenEyes_organized.local.toml'
conf = '/Data1/code/rt-cloud/projects/greenEyes/conf/greenEyes_organized.toml'
args = StructDict()
args.config=conf
args.runs = '1'
args.scans = '5'
args.webpipe = None
args.filesremote = False
cfg = greenEyes.initializeGreenEyes(args.config,args)

r = 0
fileStr = '{0}/patternsData_r{1}*'.format(cfg.subject_full_day_path,r+1)
run_pat = glob.glob(fileStr)[-1]
run_data = loadMatFile(run_pat)

# check classifier
modelfn = '/home/amennen/utils/greenEyes_clf/UPPERRIGHT_stationInd_0_ROI_1_AVGREMOVE_1_filter_0_k1_0_k2_25.sav'
modelfn = '/Data1/code/utils_greenEyes/greenEyes_clf/UPPERRIGHT_stationInd_0_ROI_1_AVGREMOVE_1_filter_0_k1_0_k2_25.sav''
Esempio n. 27
0
defaultConfig = os.path.join(currPath, 'conf/greenEyes_organized.toml')

if __name__ == "__main__":
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename=os.path.join(currPath, 'logs/webServer.log'))

    argParser = argparse.ArgumentParser()
    argParser.add_argument('--filesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    argParser.add_argument('--config',
                           '-c',
                           default=defaultConfig,
                           type=str,
                           help='experiment file (.json or .toml)')
    args = argParser.parse_args()
    # HERE: Set the path to the fMRI Python script to run here
    params = StructDict({
        'fmriPyScript': 'projects/greenEyes/greenEyes.py',
        'filesremote': args.filesremote,
    })

    cfg = loadConfigFile(args.config)

    web = Web()
    web.start(params, cfg)
Esempio n. 28
0

def runExampleService(args, isStartedEvent):
    exampleServer = ExampleService(args)
    exampleThread = threading.Thread(
        name='exampleThread', target=exampleServer.wsRemoteService.runForever)
    exampleThread.start()
    while exampleServer.wsRemoteService.started is False:
        time.sleep(.1)
    isStartedEvent.set()


defaultCfg = StructDict({
    'sessionId': "test",
    'subjectName': "test_sample",
    'subjectNum': 1,
    'subjectDay': 1,
    'sessionNum': 1
})

defaultProjectArgs = StructDict({
    'config': defaultCfg,
    'mainScript': 'projects/sample/sample.py',
    'port': testPort,
    'test': True
})


class BackgroundTestServers:
    def __init__(self):
        self.projectProc = None