def webPipeCallback(request):
     if 'cmd' not in request:
         raise StateError('handleFifoRequests: cmd field not in request: {}'.format(request))
     cmd = request['cmd']
     route = request.get('route')
     response = StructDict({'status': 200})
     if route == 'dataserver':
         try:
             response = RtAttenWeb.webServer.sendDataMsgFromThread(request, timeout=10)
             if response is None:
                 raise StateError('handleFifoRequests: Response None from sendDataMessage')
             if 'status' not in response:
                 raise StateError('handleFifoRequests: status field missing from response: {}'.format(response))
             if response['status'] not in (200, 408):
                 if 'error' not in response:
                     raise StateError('handleFifoRequests: error field missing from response: {}'.format(response))
                 RtAttenWeb.webServer.setUserError(response['error'])
                 logging.error('handleFifo status {}: {}'.format(response['status'], response['error']))
         except Exception as err:
             errStr = 'SendDataMessage Exception type {}: error {}:'.format(type(err), str(err))
             response = {'status': 400, 'error': errStr}
             RtAttenWeb.webServer.setUserError(errStr)
             logging.error('handleFifo Excpetion: {}'.format(errStr))
             raise err
     else:
         if cmd == 'webCommonDir':
             response.filename = CommonOutputDir
         elif cmd == 'classificationResult':
             try:
                 predict = request['value']
                 runId = request.get('runId')
                 # predict has {'catsep': val, 'vol': val}
                 catsep = predict.get('catsep')
                 vol = predict.get('vol')
                 # Test for NaN by comparing value to itself, if not equal then it is NaN
                 if catsep is not None and catsep == catsep:
                     image_b64Str = RtAttenWeb.createFeedbackImage(vol, catsep)
                     cmd = {'cmd': 'subjectDisplay', 'data': image_b64Str}
                     RtAttenWeb.webServer.sendSubjMsgFromThread(json.dumps(cmd))
                     # also update clinician window
                     # change classification value range to 0 to 1 instead of -1 to 1
                     # classVal = (catsep + 1) / 2
                     cmd = {'cmd': 'classificationResult', 'classVal': catsep, 'vol': vol, 'runId': runId}
                     RtAttenWeb.webServer.sendUserMsgFromThread(json.dumps(cmd))
             except Exception as err:
                 errStr = 'SendClassification Exception type {}: error {}:'.format(type(err), str(err))
                 response = {'status': 400, 'error': errStr}
                 RtAttenWeb.webServer.setUserError(errStr)
                 logging.error('handleFifo Excpetion: {}'.format(errStr))
                 raise err
         elif cmd == 'subjectDisplay':
             # forward to subject window
             color = request.get('bgcolor')
             if color is not None:
                 image_b64Str = RtAttenWeb.createSolidColorImage(color)
                 cmd = {'cmd': 'subjectDisplay', 'data': image_b64Str}
                 RtAttenWeb.webServer.sendSubjMsgFromThread(json.dumps(cmd))
             else:
                 RtAttenWeb.webServer.sendSubjMsgFromThread(request)
     return response
Beispiel #2
0
def clientWebpipeCmd(webpipes, cmd):
    '''Send a web request using named pipes to the web server for handling.
    This allows a separate client process to make requests of the web server process.
    It writes the request on fd_out and recieves the reply on fd_in.
    '''
    webpipes.fd_out.write(json.dumps(cmd) + os.linesep)
    msg = webpipes.fd_in.readline()
    if len(msg) == 0:
        # fifo closed
        raise StateError('WebPipe closed')
    response = json.loads(msg)
    retVals = StructDict()
    decodedData = None
    if 'status' not in response:
        raise StateError(
            'clientWebpipeCmd: status not in response: {}'.format(response))
    retVals.statusCode = response['status']
    if retVals.statusCode == 200:  # success
        if 'filename' in response:
            retVals.filename = response['filename']
        if 'data' in response:
            decodedData = b64decode(response['data'])
            if retVals.filename is None:
                raise StateError('clientWebpipeCmd: filename field is None')
            retVals.data = formatFileData(retVals.filename, decodedData)
    elif retVals.statusCode not in (200, 408):
        raise RequestError('WebRequest error: status {}: {}'.format(
            retVals.statusCode, response['error']))
    return retVals
 def deleteSessionData(self):
     sessionIdPattern = re.sub('T.*', 'T*', self.id_fields.sessionId)
     filePattern = os.path.join(self.dirs.serverDataDir,
                                "*" + sessionIdPattern + "*.mat")
     fileInfo = StructDict()
     fileInfo.filePattern = filePattern
     reply = self.sendCmdExpectSuccess(MsgEvent.DeleteData, fileInfo)
     outputReplyLines(reply.fields.outputlns, None)
Beispiel #4
0
def main():
    configFile = 'greenEyes.toml'
    cfg = initializeGreenEyes(configFile)
    runData = StructDict()
    runData.cheating_probability = np.zeros((cfg.nStations, ))
    runData.correct_prob = np.zeros((cfg.nStations, ))
    runData.interpretation = getSubjectInterpretation(cfg)
    runData.badVoxels = {}
    runData.dataForClassification = {}
    story_TRs = cfg.story_TR_2 - cfg.story_TR_1
    SKIP = 10
    all_data = np.zeros((cfg.nVox, cfg.nTR_run - SKIP))  # don't need to save
    runData.story_data = np.zeros((cfg.nVox, story_TRs))
    #### MAIN PROCESSING ###
    ## FUNCTION TO OPERATE OVER ALL SCANNING RUNS
    scanNum = 9
    for TRindex in np.arange(cfg.nTR_run - SKIP):
        print('TRindex')
        #for TRindex in np.arange(44):
        TRnum = TRindex + 1 + SKIP  # actual file number to look for
        TRindex_story = TRindex - cfg.story_TR_1
        full_nifti_name = convertToNifti(TRnum, scanNum, cfg)
        registeredFileName = registerNewNiftiToMNI(cfg, full_nifti_name)
        maskedData = apply_mask(registeredFileName, cfg.MASK)
        all_data[:, TRindex] = maskedData
        if TRindex_story >= 0:  # we're at a story TR now
            runData.story_data[:, TRindex_story] = maskedData
            if np.any(TRindex_story == cfg.last_tr_in_station.astype(int)):
                # NOW PREPROCESS AND CLASSIFY
                runData = preprocessAndPredict(cfg, runData, TRindex_story)
        else:
            pass
Beispiel #5
0
 def __init__(self, rtatten):
     super().__init__()
     self.cfg = None
     self.dirs = StructDict()
     self.prevData = None
     self.observer = None
     self.fileNotifyHandler = None
     self.fileNotifyQ = Queue()  # type: None
     self.printFirstFilename = True
     self.logtimeFile = None
     self.id_fields = StructDict()
     self.rtatten = rtatten
Beispiel #6
0
 def test_runRegistration(cls, configData):
     params = StructDict()
     params.cfg = configData
     regGlobals = localCreateRegConfig(params.cfg)
     request = {
         'cmd': 'runReg',
         'regConfig': regGlobals,
         'regType': 'test',
         'dayNum': 1
     }
     lineCount = RtAttenWeb.runRegistration(
         request, test=['ping', 'localhost', '-c', '3'])
     assert lineCount == 8
Beispiel #7
0
def makeTrainCfg(run):
    trainCfg = StructDict()
    if run.runId == 1:
        trainCfg.blkGrpRefs = [{'run': 1, 'phase': 1}, {'run': 1, 'phase': 2}]
    elif run.runId == 2:
        trainCfg.blkGrpRefs = [{'run': 1, 'phase': 2}, {'run': 2, 'phase': 1}]
    else:
        trainCfg.blkGrpRefs = [{
            'run': run.runId - 1,
            'phase': 1
        }, {
            'run': run.runId,
            'phase': 1
        }]
    return trainCfg
 def sendDataMsgFromThread(msg, timeout=None):
     if Web.wsDataConn is None:
         raise StateError("WebServer: No Data Websocket Connection")
     callbackStruct = StructDict()
     callbackStruct.event = threading.Event()
     # schedule the call with io thread
     Web.ioLoopInst.add_callback(Web.sendDataMessage, msg, callbackStruct)
     # wait for completion of call
     callbackStruct.event.wait(timeout)
     if callbackStruct.event.is_set() is False:
         raise TimeoutError("sendDataMessage: Data Request Timed Out({}) {}".format(timeout, msg))
     if callbackStruct.response is None:
         raise StateError('sendDataMessage: callbackStruct.response is None for command {}'.format(msg))
     if callbackStruct.status == 200 and 'writefile' in callbackStruct.response:
         writeResponseDataToFile(callbackStruct.response)
     return callbackStruct.response
Beispiel #9
0
    def Predict(self, TR):
        """Given a scan image (TR) predict the classification of the data (face/scene)
        """
        predict_result = StructDict()
        outputlns = []
        patterns = self.blkGrp.patterns
        combined_raw_sm = self.blkGrp.combined_raw_sm
        combined_TRid = self.blkGrp.firstVol + TR.trId

        combined_raw_sm[combined_TRid] = patterns.raw_sm[TR.trId]
        patterns.raw_sm_filt[TR.trId, :] = \
            highPassRealTime(combined_raw_sm[0:combined_TRid+1, :], self.run.TRTime, self.session.cutoff)
        patterns.raw_sm_filt_z[TR.trId, :] = \
            (patterns.raw_sm_filt[TR.trId, :] - patterns.phase1Mean[0, :]) / patterns.phase1Std[0, :]

        if self.run.rtfeedback:
            TR_regressor = np.array(TR.regressor)
            if np.any(TR_regressor):
                patterns.predict[0, TR.trId], _, _, patterns.activations[:, TR.trId] = \
                    Test_L2_RLR_realtime(self.blkGrp.trainedModel, patterns.raw_sm_filt_z[TR.trId, :],
                                         TR_regressor)
                # determine whether expecting face or scene for this TR
                categ = np.flatnonzero(TR_regressor)
                # the other category will be categ+1 mod 2 since there are only two category types
                otherCateg = (categ + 1) % 2
                patterns.categoryseparation[0, TR.trId] = \
                    patterns.activations[categ, TR.trId]-patterns.activations[otherCateg, TR.trId]
            else:
                patterns.categoryseparation[0, TR.trId] = np.nan
            predict_result.catsep = patterns.categoryseparation[0, TR.trId]
            predict_result.vol = patterns.fileNum[0, TR.trId]
        else:
            patterns.categoryseparation[0, TR.trId] = np.nan

        # print TR results
        categorysep_mean = np.nan
        # TODO - do we need to handle 0:TR here to include phase 1 data?
        if not np.all(np.isnan(patterns.categoryseparation[0, 0:TR.trId + 1])):
            categorysep_mean = np.nanmean(
                patterns.categoryseparation[0, 0:TR.trId + 1])
        output_str = '{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{}\t{:d}\t{:.1f}\t{:.3f}\t{:.3f}'.format(
            self.id_fields.runId, self.id_fields.blockId, TR.trId, TR.type,
            TR.attCateg, TR.stim, patterns.fileNum[0, TR.trId],
            patterns.fileload[0, TR.trId], patterns.predict[0, TR.trId],
            patterns.categoryseparation[0, TR.trId], categorysep_mean)
        outputlns.append(output_str)
        return predict_result, outputlns
Beispiel #10
0
 def __init__(self):
     super().__init__()
     self.dirs = StructDict()
     self.blkGrpCache = {}  # type: ignore
     self.modelCache = {}  # type: ignore
     self.session = None
     self.run = None
     self.blkGrp = None
 def __init__(self):
     self.id_fields = StructDict()
     self.id_fields.experimentId = 0
     self.id_fields.sessionId = -1
     self.id_fields.runId = -1
     self.id_fields.blkGrpId = -1
     self.id_fields.blockId = -1
     self.id_fields.trId = -1
     self.blockType = -1
Beispiel #12
0
def makeFifo():
    fifodir = '/tmp/pipes/'
    if not os.path.exists(fifodir):
        os.makedirs(fifodir)
    # remove all previous pipes
    for p in Path(fifodir).glob("rtatten_*"):
        p.unlink()
    # create new pipe
    fifoname = os.path.join(fifodir, 'rtatten_pipe_{}'.format(int(time.time())))
    # fifo stuct
    webpipes = StructDict()
    webpipes.name_out = fifoname + '.toclient'
    webpipes.name_in = fifoname + '.fromclient'
    if not os.path.exists(webpipes.name_out):
        os.mkfifo(webpipes.name_out)
    if not os.path.exists(webpipes.name_in):
        os.mkfifo(webpipes.name_in)
    webpipes.fifoname = fifoname
    return webpipes
Beispiel #13
0
 def retrieveFile(self, filename):
     print("Retrieving data for {}... ".format(filename), end='')
     fileInfo = StructDict()
     fileInfo.subjectNum = self.id_fields.subjectNum
     fileInfo.subjectDay = self.id_fields.subjectDay
     fileInfo.filename = filename
     stime = time.time()
     replyId = self.rtatten.RetrieveData.remote(fileInfo)
     reply = ray.get(replyId)
     assert reply.success is True
     print("took {:.2f} secs".format(time.time() - stime))
     clientFile = os.path.join(self.dirs.dataDir, filename)
     writeFile(clientFile, reply.data)
     serverFile = os.path.join(self.dirs.serverDataDir, filename)
     if not os.path.exists(serverFile):
         try:
             os.symlink(clientFile, serverFile)
         except OSError:
             print("Unable to link file %s", serverFile)
 def __init__(self):
     super().__init__()
     self.dirs = StructDict()
     self.prevData = None
     self.printFirstFilename = True
     self.ttlPulseClient = TTLPulseClient()
     self.fileWatcher = FileWatcher()
     self.webpipes = None
     self.webCommonDir = None
     self.webPatternsDir = None
     self.webUseRemoteFiles = False
def test_baseModel(cfgFilePath):
    print("test_baseModel")
    logging.error("###Test logging###")
    # import pdb; pdb.set_trace()
    params = StructDict(
        {'addr': 'localhost', 'port': 5210,
         'experiment': cfgFilePath,
         'run_local': True, 'use_web': False,
        })
    result = ClientMain.ClientMain(params)
    assert result is True
 def test_structDict(self):
     print("Test StructDict:")
     a = StructDict()
     a.top = 1
     a.bottom = 3
     a.sub = StructDict()
     a.sub.left = 'corner'
     assert a.top == 1 and a.bottom == 3 and a.sub.left == 'corner'
def test_runSyntheticData(cfgFilePath):
    print("rtfMRI: test_runSyntheticData")
    # Generate synthetic image data for the test runs if needed
    gd.generate_data(cfgFilePath)

    # import pdb; pdb.set_trace()
    params = StructDict({
        'addr': 'localhost',
        'port': 5211,
        'experiment': cfgFilePath,
        'run_local': True,
        'use_web': False,
    })
    result = ClientMain.ClientMain(params)
    assert result is True
Beispiel #18
0
def webHandler(configData):
    global webIsStarted
    params = StructDict()
    params.rtserver = 'localhost:5200'
    params.rtlocal = False
    params.filesremote = False
    params.feedbackdir = 'webInterface/images'
    if not webIsStarted:
        webIsStarted = True
        RtAttenWeb.init(params, configData)
Beispiel #19
0
def ClientMain(params):
    installLoggers(logging.INFO,
                   logging.INFO,
                   filename='logs/rtAttenClient.log')

    webpipes = None
    if params.webpipe is not None:
        # This process was opened by a webserver which will communicate using webpipes.
        # Open the in and out named pipes and pass to RtAttenClient for communication
        # with the webserver process. Pipe.Open() blocks until the other end opens
        # it as well. Therefore open the reader first here and the writer
        # first within the webserver.
        webpipes = StructDict()
        webpipes.name_in = params.webpipe + '.toclient'
        webpipes.name_out = params.webpipe + '.fromclient'
        webpipes.fd_in = open(webpipes.name_in, mode='r')
        webpipes.fd_out = open(webpipes.name_out, mode='w', buffering=1)
        # Create a thread which will detect if the parent process exited by
        #  reading from stdin, when stdin is closed exit this process
        exitThread = threading.Thread(name='exitThread',
                                      target=processShouldExitThread,
                                      args=(params, ))
        exitThread.setDaemon(True)
        exitThread.start()

    cfg = loadConfigFile(params.experiment)
    params = mergeParamsConfigs(params, cfg)

    # Start local server if requested
    if params.run_local is True:
        startLocalServer(params.port)

    # run based on config file and passed in options
    client: RtfMRIClient  # define a new variable of type RtfMRIClient
    if params.cfg.experiment.model == 'base':
        client = BaseClient()
    elif params.cfg.experiment.model == 'rtAtten':
        client = RtAttenClient()
        if params.webpipe is not None:
            client.setWeb(webpipes, params.webfilesremote)
    else:
        raise InvocationError("Unsupported model %s" %
                              (params.cfg.experiment.model))
    try:
        client.runSession(params.addr, params.port, params.cfg)
    except Exception as err:
        print(err)
        traceback_str = ''.join(traceback.format_tb(err.__traceback__))
        print(traceback_str)

    if params.run_local is True:
        stopLocalServer(params)

    return True
 def retrieveFile(self, filename):
     fileInfo = StructDict()
     fileInfo.subjectNum = self.id_fields.subjectNum
     fileInfo.subjectDay = self.id_fields.subjectDay
     fileInfo.filename = filename
     if self.cfg.session.useSessionTimestamp is True:
         fileInfo.findNewestPattern = re.sub(r'T\d{6}', 'T*', filename)
         print("Retrieving newest file for {}... ".format(
             fileInfo.findNewestPattern),
               end='')
     else:
         print("Retrieving file {}... ".format(filename), end='')
     stime = time.time()
     reply = self.sendCmdExpectSuccess(MsgEvent.RetrieveData, fileInfo)
     retFilename = reply.fields.filename
     print("took {:.2f} secs".format(time.time() - stime))
     clientFile = os.path.join(self.dirs.dataDir, retFilename)
     writeFile(clientFile, reply.data)
     serverFile = os.path.join(self.dirs.serverDataDir, retFilename)
     if not os.path.exists(serverFile):
         try:
             os.symlink(clientFile, serverFile)
         except OSError:
             logging.error("Unable to link file %s", serverFile)
def createBlockGroupConfig(tr_range, patterns):
    blkGrp = StructDict()
    blkGrp.blocks = []
    blkGrp.type = 0
    blkGrp.firstVol = tr_range[0]
    block = StructDict()
    blockNum = -1
    for iTR in tr_range:
        if patterns.block[0, iTR] > 0 and patterns.block[0, iTR] != blockNum:
            if blockNum >= 0:
                blkGrp.blocks.append(block)
            blockNum = int(patterns.block[0, iTR])
            block = StructDict()
            block.blockId = blockNum
            block.TRs = []
        tr = StructDict()
        tr.trId = iTR - blkGrp.firstVol
        tr.vol = iTR + 1
        tr.attCateg = int(patterns.attCateg[0, iTR])
        tr.stim = int(patterns.stim[0, iTR])
        tr.type = int(patterns.type[0, iTR])
        if tr.type != 0:
            if blkGrp.type == 0:
                blkGrp.type = tr.type
            if blkGrp.type != tr.type:
                raise ValidationError(
                    "createBlockGroupConfig: inconsistent TR types in block group"
                )
        tr.regressor = [
            int(patterns.regressor[0, iTR]),
            int(patterns.regressor[1, iTR])
        ]
        block.TRs.append(tr)
    if len(block.TRs) > 0:
        blkGrp.blocks.append(block)
    return blkGrp
Beispiel #22
0
    def StartBlockGroup(self, msg):
        """Initialize a block group. A run is comprised of 2 block groups (called phases in Matlab version).
        Block groups can be of type 1=(Training) or 2=(RealTime Predictions)
        Initialize the patterns data structure for the block group.
        If it is a prediction group (type 2) then load the trained model from the
        previous run and load the patterns data from the previous block group.
        """
        reply = super().StartBlockGroup(msg)
        if reply.result != MsgResult.Success:
            return reply
        errorReply = self.createReplyMessage(msg, MsgResult.Error)
        blkGrp = msg.fields.cfg
        run = self.run
        if blkGrp.nTRs is None:
            errorReply.data = "StartBlkGrp msg missing nTRs in blockGroup"
            return errorReply
        if self.session.nVoxels is None:
            errorReply.data = "StartBlkGrp msg missing nVoxels in blockGroup"
            return errorReply
        if self.session.roiInds is None:
            errorReply.data = "StartBlkGrp msg missing roiInds in blockGroup"
            return errorReply
        if blkGrp.blkGrpId not in (1, 2):
            errorReply.data = "StartBlkGrp: BlkGrpId {} not valid" % (
                blkGrp.blkGrpId)
            return errorReply
        blkGrp.legacyRun1Phase2Mode = False
        if run.runId == 1 and blkGrp.blkGrpId == 2:
            # Legacy matlab mode is where run1 phase2 is treated as a predict phase
            # By default use legacy mode for run1 phase2
            blkGrp.legacyRun1Phase2Mode = True
            if self.session.legacyRun1Phase2Mode is False:
                blkGrp.legacyRun1Phase2Mode = False
            reply.fields.outputlns.append('Legacy mode: {}'.format(
                blkGrp.legacyRun1Phase2Mode))
        blkGrp.patterns = StructDict()
        blkGrp.patterns.raw = np.full((blkGrp.nTRs, self.session.nVoxels),
                                      np.nan)
        blkGrp.patterns.raw_sm = np.full((blkGrp.nTRs, self.session.nVoxels),
                                         np.nan)
        blkGrp.patterns.raw_sm_filt = np.full(
            (blkGrp.nTRs, self.session.nVoxels), np.nan)
        blkGrp.patterns.raw_sm_filt_z = np.full(
            (blkGrp.nTRs, self.session.nVoxels), np.nan)
        blkGrp.patterns.phase1Mean = np.full((1, self.session.nVoxels), np.nan)
        blkGrp.patterns.phase1Y = np.full((1, self.session.nVoxels), np.nan)
        blkGrp.patterns.phase1Std = np.full((1, self.session.nVoxels), np.nan)
        blkGrp.patterns.phase1Var = np.full((1, self.session.nVoxels), np.nan)
        blkGrp.patterns.categoryseparation = np.full(
            (1, blkGrp.nTRs), np.nan)  # (matlab: NaN(1,nTRs))
        blkGrp.patterns.predict = np.full((1, blkGrp.nTRs), np.nan)
        blkGrp.patterns.activations = np.full((2, blkGrp.nTRs), np.nan)
        blkGrp.patterns.attCateg = np.full((1, blkGrp.nTRs), np.nan)
        blkGrp.patterns.stim = np.full((1, blkGrp.nTRs), np.nan)
        blkGrp.patterns.type = np.full((1, blkGrp.nTRs), np.nan)
        blkGrp.patterns.regressor = np.full((2, blkGrp.nTRs), np.nan)
        # blkGrp.patterns.fileAvail = np.zeros((1, blkGrp.nTRs), dtype=np.uint8)
        blkGrp.patterns.fileload = np.full((1, blkGrp.nTRs),
                                           np.nan,
                                           dtype=np.uint8)
        blkGrp.patterns.fileNum = np.full((1, blkGrp.nTRs),
                                          np.nan,
                                          dtype=np.uint16)
        blkGrp.FWHM = self.session.FWHM
        blkGrp.cutoff = self.session.cutoff
        blkGrp.gitCodeId = utils.getGitCodeId()
        self.blkGrp = blkGrp
        if self.blkGrp.type == 2 or blkGrp.legacyRun1Phase2Mode:
            # ** Realtime Feedback Phase ** #
            try:
                # get blkGrp from phase 1
                prev_bg = self.getPrevBlkGrp(self.id_fields.sessionId,
                                             self.id_fields.runId, 1)
            except Exception as err:
                errorReply.data = "Error: getPrevBlkGrp(%r, %r, 1): %r" %\
                    (self.id_fields.sessionId, self.id_fields.runId, err)
                return errorReply
            self.blkGrp.patterns.phase1Mean[
                0, :] = prev_bg.patterns.phase1Mean[0, :]
            self.blkGrp.patterns.phase1Y[0, :] = prev_bg.patterns.phase1Y[0, :]
            self.blkGrp.patterns.phase1Std[0, :] = prev_bg.patterns.phase1Std[
                0, :]
            self.blkGrp.patterns.phase1Var[0, :] = prev_bg.patterns.phase1Var[
                0, :]
            self.blkGrp.combined_raw_sm = np.concatenate(
                (prev_bg.patterns.raw_sm, blkGrp.patterns.raw_sm))
            self.blkGrp.combined_catsep = np.concatenate(
                (prev_bg.patterns.categoryseparation,
                 blkGrp.patterns.categoryseparation))

            if self.id_fields.runId > 1:
                try:
                    # get trained model
                    self.blkGrp.trainedModel = self.getTrainedModel(
                        self.id_fields.sessionId, self.id_fields.runId - 1)
                except Exception as err:
                    errorReply.data = "Error: getTrainedModel(%r, %r): %r" %\
                        (self.id_fields.sessionId, self.id_fields.runId-1, err)
                    return errorReply
            reply.fields.outputlns.append(
                '*********************************************')
            reply.fields.outputlns.append('beginning model testing...')
            # prepare for TR sequence
            reply.fields.outputlns.append(
                'run\tblock\tTR\tbltyp\tblcat\tstim\tfilenum\tloaded\tpredict\toutput\tavg'
            )
        return reply
			os.makedirs(subject_reg_dir)
		cfg.intelrt.wf_dir = '{0}/{1}/ses-{2:02d}/registration/'.format(cfg.intelrt.codeDir,cfg.bids_id,1)
		cfg.intelrt.BOLD_to_T1= cfg.intelrt.wf_dir + 'affine.txt'
		cfg.intelrt.T1_to_MNI= cfg.intelrt.wf_dir + 'ants_t1_to_mniComposite.h5'
		cfg.intelrt.ref_BOLD=cfg.intelrt.wf_dir + 'ref_image.nii.gz'

def main():
	
	# MAKES STRUCT WITH ALL PARAMETERS IN IT
	argParser = argparse.ArgumentParser()
	argParser.add_argument('--config', '-c', default='greenEyes_organized.toml',type=str,
	               help='experiment config file (.json or .toml)')
    argParser.add_argument('--machine', '-m', default='intelrt',type=str,
                   help='which machine is running this script (intelrt) or (cloud)')
	args = argParser.parse_args()
	params = StructDict({'config': args.config, 'machine': args.machine})
	
    cfg = loadConfigFile(params.config)
	cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
	cfg.ses_id = 'ses-{0:02d}'.format(1)
	# get subj

	if params.machine == 'intel':
		# get intel computer ready
		cfg = buildSubjectFoldersIntelrt(cfg)
        if cfg.subjectDay == 2:
    		cluster_wf_dir='{0}/derivatives/work/fmriprep_wf/single_subject_{1:03d}_wf'.format(cfg.clusterBidsDir,cfg.subjectNum)
    		cluster_BOLD_to_T1= cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reg_wf/bbreg_wf/fsl2itk_fwd/affine.txt'
    		cluster_T1_to_MNI= cluster_wf_dir + '/anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5'
    		cluster_ref_BOLD=glob.glob(cluster_wf_dir + '/func_preproc_ses_01_task_story_run_01_wf/bold_reference_wf/gen_ref/ref_image.nii.gz')[0]
    		copyClusterFileToIntel(cluster_BOLD_to_T1,cfg.subject_offline_registration_path)
Beispiel #24
0
def validateModelsMatlabPython(subjectNum, subjectDay, usesamedata):

    dataPath = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/'
    configFile = dataPath + 'subject' + str(
        subjectNum) + '/usedscripts/PennCfg_Day' + str(subjectDay) + '.toml'
    cfg = loadConfigFile(configFile)
    #subjectDayDir = getSubjectDayDir(cfg.session.subjectNum, cfg.session.subjectDay)
    subjectDayDir = '/data/jag/cnds/amennen/rtAttenPenn/fmridata/behavdata/gonogo/subject' + str(
        cfg.session.subjectNum) + '/day' + str(cfg.session.subjectDay)
    matDataDir = subjectDayDir  #os.path.join(cfg.session.dataDir, subjectDayDir)
    pyDataDir = matDataDir
    all_vals = np.zeros((100, 2, cfg.session.Runs[-1] - 1))
    usenewmodel = 1
    #usesamedata = 1 #whether or not to use same data as with matlab
    for runId in np.arange(1, cfg.session.Runs[-1]):
        runDir = 'run' + str(runId) + '/'
        matModelFn = utils.findNewestFile(
            matDataDir, runDir + 'trainedModel_' + str(runId) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId) + '*_py.mat')
        matModel_train = utils.loadMatFile(matModelFn)
        # to find what matModel includes use matModel.keys() --> trainedModel, trainPats, trainLabels
        # for each model we have W [ nVoxel x 2 classes], biases [ 1 x 2 classes]
        # we can't apply this model to any of the examples in this run, but let's apply it to the first 4 blocks of the next run
        # now load testing data from the next run to test it on
        pyModel_train = utils.loadMatFile(pyModelFn)
        # INSTEAD MAKE NEW MODEL
        print(runId)
        if usenewmodel:
            lrc1 = LogisticRegression(penalty='l2', solver='sag', max_iter=300)
            lrc2 = LogisticRegression(penalty='l2', solver='sag', max_iter=300)
            if usesamedata:
                lrc1.fit(matModel_train.trainPats,
                         pyModel_train.trainLabels[:, 0])
                lrc2.fit(matModel_train.trainPats,
                         pyModel_train.trainLabels[:, 1])
            else:
                lrc1.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:,
                                                                            0])
                lrc2.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:,
                                                                            1])
            newTrainedModel = utils.MatlabStructDict({}, 'trainedModel')
            newTrainedModel.trainedModel = StructDict({})
            newTrainedModel.trainedModel.weights = np.concatenate(
                (lrc1.coef_.T, lrc2.coef_.T), axis=1)
            newTrainedModel.trainedModel.biases = np.concatenate(
                (lrc1.intercept_, lrc2.intercept_)).reshape(1, 2)
            newTrainedModel.trainPats = pyModel_train.trainPats
            newTrainedModel.trainLabels = pyModel_train.trainLabels
        # now load the models to test on
        matModelFn = utils.findNewestFile(
            matDataDir, 'run' + str(runId + 1) + '/' + 'trainedModel_' +
            str(runId + 1) + '*.mat')
        pyModelFn = utils.findNewestFile(
            pyDataDir, 'trainedModel_r' + str(runId + 1) + '*_py.mat')
        matModel_test = utils.loadMatFile(matModelFn)
        pyModel_test = utils.loadMatFile(pyModelFn)
        nTRTest = 100
        mat_test_data = matModel_test.trainPats[nTRTest:, :]
        py_test_data = pyModel_test.trainPats[nTRTest:, :]
        test_labels = matModel_test.trainLabels[nTRTest:, :]
        mat_cs = np.zeros((nTRTest, 1))
        py_cs = np.zeros((nTRTest, 1))
        for t in np.arange(nTRTest):
            categ = np.flatnonzero(test_labels[t, :])
            otherCateg = (categ + 1) % 2
            _, _, _, activations_mat = Test_L2_RLR_realtime(
                matModel_train, mat_test_data[t, :], test_labels[t, :])
            mat_cs[t] = activations_mat[categ] - activations_mat[otherCateg]
            if not usenewmodel:
                if not usesamedata:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        pyModel_train, py_test_data[t, :], test_labels[t, :])
                else:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        pyModel_train, mat_test_data[t, :], test_labels[t, :])
            else:
                if not usesamedata:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        newTrainedModel, py_test_data[t, :], test_labels[t, :])
                else:
                    _, _, _, activations_py = Test_L2_RLR_realtime(
                        newTrainedModel, mat_test_data[t, :],
                        test_labels[t, :])
            py_cs[t] = activations_py[categ] - activations_py[otherCateg]
        all_vals[:, 0, runId - 1] = mat_cs[:, 0]
        all_vals[:, 1, runId - 1] = py_cs[:, 0]
        #plt.figure()
        #if usenewmodel:
        #	plt.plot(matModel_train.weights[:,0],newTrainedModel.weights[:,0], '.')
        #else:
        #	plt.plot(matModel_train.weights[:,0],pyModel_train.weights[:,0], '.')
        #plt.xlim([-.02 ,.02])
        #plt.ylim([-.02 ,.02])
        #plt.xlabel('MATLAB')
        #plt.ylabel('PYTHON')
        #plt.show()
    all_mat_ev = np.reshape(all_vals[:, 0, :],
                            ((cfg.session.Runs[-1] - 1) * 100, 1))
    all_py_ev = np.reshape(all_vals[:, 1, :],
                           ((cfg.session.Runs[-1] - 1) * 100, 1))
    fix, ax = plt.subplots(figsize=(12, 7))
    plt.plot(all_mat_ev, all_py_ev, '.')
    plt.plot([-5, 5], [-5, 5], '--k')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.xlabel('MATLAB')
    plt.ylabel('PYTHON')
    plt.xlim([-1.5, 1.5])
    plt.ylim([-1.5, 1.5])
    plt.show()

    plt.figure()
    plt.hist(all_mat_ev, alpha=0.6, label='matlab')
    plt.hist(all_py_ev, alpha=0.6, label='python')
    plt.xlabel('Correct - Incorrect Activation')
    plt.ylabel('Frequency')
    plt.title('S%i MAT x PY CORR = %4.4f' %
              (cfg.session.subjectNum,
               scipy.stats.pearsonr(all_mat_ev, all_py_ev)[0][0]))
    plt.legend()
    plt.show()
        print(runId)
        runDir = 'run'+str(runId)+'/'
        pyModelFn = utils.findNewestFile(pyDataDir, 'trainedModel_r'+str(runId)+'*_py.mat')
        # to find what matModel includes use matModel.keys() --> trainedModel, trainPats, trainLabels
        # for each model we have W [ nVoxel x 2 classes], biases [ 1 x 2 classes]
        # we can't apply this model to any of the examples in this run, but let's apply it to the first 4 blocks of the next run
        # now load testing data from the next run to test it on
        pyModel_train = utils.loadMatFile(pyModelFn)
        # INSTEAD MAKE NEW MODEL
        lrc1 = LogisticRegression(penalty='l2', solver='saga',max_iter=300)
        lrc2 = LogisticRegression(penalty='l2', solver='saga',max_iter=300)

        lrc1.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:, 0])
        lrc2.fit(pyModel_train.trainPats, pyModel_train.trainLabels[:, 1])
        newTrainedModel = utils.MatlabStructDict({}, 'trainedModel')
        newTrainedModel.trainedModel = StructDict({})
        newTrainedModel.trainedModel.weights = np.concatenate((lrc1.coef_.T, lrc2.coef_.T), axis=1)
        newTrainedModel.trainedModel.biases = np.concatenate((lrc1.intercept_, lrc2.intercept_)).reshape(1, 2)
        newTrainedModel.trainPats = pyModel_train.trainPats
        newTrainedModel.trainLabels = pyModel_train.trainLabels

        # now load testing data for CV
        pyModelFn = utils.findNewestFile(pyDataDir, 'trainedModel_r'+str(runId + 1)+'*_py.mat')
        pyModel_test = utils.loadMatFile(pyModelFn)
        py_test_data = pyModel_test.trainPats[nTRTest:,:]
        test_labels = pyModel_test.trainLabels[nTRTest:,:]
        py_cs = np.zeros((nTRTest, 1))
        activations = np.zeros((nTRTest,2))
        for t in np.arange(nTRTest):
            _, _, _, activations_py = Test_L2_RLR_realtime(newTrainedModel,py_test_data[t,:],test_labels[t,:])
            activations[t,:] = activations_py
def test_createRegConfig():
    client = RtAttenClient()

    # Test 1, list in a string
    cfg = StructDict()
    cfg.session = StructDict()
    cfg.session.Runs = '1 ,2, 3'
    cfg.session.ScanNums = '1'
    assert checkCfg(client, cfg)
    assert type(cfg.session.Runs[0]) is int
    assert type(cfg.session.ScanNums[0]) is int

    # Test 2, list of strings
    cfg = StructDict()
    cfg.session = StructDict()
    cfg.session.Runs = ['1', '2', '3']
    cfg.session.ScanNums = ['1']
    assert checkCfg(client, cfg)
    assert type(cfg.session.Runs[0]) is int
    assert type(cfg.session.ScanNums[0]) is int

    # Test 3, list of string list
    cfg = StructDict()
    cfg.session = StructDict()
    cfg.session.Runs = ['1 ,2, 3']
    cfg.session.ScanNums = ['1']
    assert checkCfg(client, cfg)
    assert type(cfg.session.Runs[0]) is int
    assert type(cfg.session.ScanNums[0]) is int

    # Test 3, list of ints
    cfg = StructDict()
    cfg.session = StructDict()
    cfg.session.Runs = [1, 2, 3]
    cfg.session.ScanNums = [1]
    assert checkCfg(client, cfg)
    assert type(cfg.session.Runs[0]) is int
    assert type(cfg.session.ScanNums[0]) is int

    # Test 4, empty list
    cfg = StructDict()
    cfg.session = StructDict()
    cfg.session.Runs = []
    cfg.session.ScanNums = []
    assert checkCfg(client, cfg) is False
def main():
	
	# MAKES STRUCT WITH ALL PARAMETERS IN IT
	argParser = argparse.ArgumentParser()
	argParser.add_argument('--config', '-c', default='greenEyes_organized.toml', type=str,
	                   help='experiment config file (.json or .toml)')
	argParser.add_argument('--runs', '-r', default=None, type=str,
	                   help='Comma separated list of run numbers')
	argParser.add_argument('--scans', '-s', default=None, type=str,
	                   help='Comma separated list of scan number')
	# creates web pipe communication link to send/request responses through web pipe
	argParser.add_argument('--webpipe', '-w', default=None, type=str,
	                   help='Named pipe to communicate with webServer')
	argParser.add_argument('--webfilesremote', '-x', default=False, action='store_true',
	                   help='dicom files retrieved from remote server')
	args = argParser.parse_args()
	params = StructDict({'config': args.config,'runs': args.runs, 'scans': args.scans,
	                 'webpipe': args.webpipe, 'webfilesremote': args.webfilesremote})
	cfg = initializeGreenEyes(params.config,params)

	# initialize file interface class -- for now only local
	fileInterface = FileInterface()
	# intialize watching in particular directory
	fileWatcher.initWatch(cfg.intelrt.imgDir, cfg.intelrt.dicomNamePattern, cfg.minExpectedDicomSize) 
	runData = StructDict()
	runData.cheating_probability = np.zeros((cfg.nStations,))
	runData.correct_prob = np.zeros((cfg.nStations,))
	runData.interpretation = getSubjectInterpretation(cfg)
	runData.badVoxels = {}
	runData.dataForClassification = {}
	story_TRs = cfg.story_TR_2 - cfg.story_TR_1
	SKIP = 10
	all_data = np.zeros((cfg.nVox,cfg.nTR_run)) # don't need to save
	runData.story_data = np.zeros((cfg.nVox,story_TRs))
	#### MAIN PROCESSING ###
	## FUNCTION TO OPERATE OVER ALL SCANNING RUNS
	# LOOP OVER ALL CFG.SCANNUMS
	nRuns = len(cfg.runs)
	for runIndex in np.arange(nRuns):
		
        header = makeRunHeader(cfg,runIndex)
        print(header)
        run = cfg.runs[runIndex]
		scanNum = cfg.scanNums[runIndex]

		storyTRCount = 0
		for TRFilenum in np.arange(SKIP+1,cfg.nTR_run+1):
			##### GET DATA BUFFER FROM LOCAL MACHINE ###
			dicomData = fileInterface.watchfile(getDicomFileName(cfg, scanNum, TRFilenum), timeout=5) # if starts with slash it's full path, if not, it assumes it's the watch directory and builds
			full_nifti_name = convertToNifti(TRFilenum,scanNum,cfg)
			registeredFileName = registerNewNiftiToMNI(cfg,full_nifti_name)
			maskedData = apply_mask(registeredFileName,cfg.mask_filename)
			all_data[:,TRFilenum] = maskedData
			if TRFilenum >= cfg.fileNum_story_TR_1 and TRFilenum <= cfg.fileNum_story_TR_2: # we're at a story TR now
				runData.story_data[:,storyTRCount] = maskedData
				if np.any(storyTRCount == cfg.last_tr_in_station.astype(int)):
					# NOW PREPROCESS AND CLASSIFY
					runData = preprocessAndPredict(cfg,runData,storyTRCount)
                    text_to_save = '{0:05d}'.format(runData.correct_prob[stationInd])
                    file_name_to_save = getStationClassoutputFilename(cfg.sessionId, cfg.run, stationInd)
                    full_filename_to_save = cfg.intelrt.subject_full_day_path + file_name_to_save
					fileInterface.putTextFile(full_filename_to_save,text_to_save)
				storyTRCount += 1
			else:
def createRunConfig(session, patterns, runId, scanNum=-1):
    run = StructDict()
    run.runId = runId
    idx = getRunIndex(session, runId)
    if scanNum >= 0:
        run.scanNum = scanNum
    elif session.ScanNums is not None and idx >= 0 and len(
            session.ScanNums) > idx:
        run.scanNum = session.ScanNums[idx]
    else:
        run.scanNum = -1

    run.disdaqs = int(patterns.disdaqs)
    run.nBlocksPerPhase = int(patterns.nBlocksPerPhase)
    run.TRTime = int(patterns.TR)
    run.nTRs = int(patterns.nTRs)
    run.nTRsFix = int(patterns.nTRsFix)

    run.firstVolPhase1 = int(np.min(np.where(patterns.block.squeeze() == 1)))
    run.lastVolPhase1 = int(
        np.max(np.where(patterns.block.squeeze() == patterns.nBlocksPerPhase)))
    if run.lastVolPhase1 != patterns.lastVolPhase1 - 1:
        raise ValidationError(
            "createRunConfig: calulated lastVolPhase1 is same as loaded from"
            "patternsdesign {} {}".format(run.lastVolPhase1,
                                          patterns.lastVolPhase1))
    run.nVolsPhase1 = run.lastVolPhase1 - run.firstVolPhase1 + 1
    run.firstVolPhase2 = int(
        np.min(
            np.where(patterns.block.squeeze() == (patterns.nBlocksPerPhase +
                                                  1))))
    if run.firstVolPhase2 != patterns.firstVolPhase2 - 1:
        raise ValidationError(
            "createRunConfig: calulated firstVolPhase2 is same as loaded from "
            "patternsdesign {} {}".format(run.firstVolPhase2,
                                          patterns.firstVolPhase2))
    run.lastVolPhase2 = int(np.max(np.where(patterns.type.squeeze() != 0)))
    run.nVolsPhase2 = run.lastVolPhase2 - run.firstVolPhase2 + 1

    sumRegressor = patterns.regressor[0, :] + patterns.regressor[1, :]
    run.firstTestTR = int(np.min(np.where(sumRegressor == 1)))

    run.nVols = patterns.block.shape[1]

    blockGroups = []

    blkGrp1 = createBlockGroupConfig(range(run.firstVolPhase2), patterns)
    blkGrp1.blkGrpId = 1
    blkGrp1.nTRs = run.firstVolPhase2
    blockGroups.append(blkGrp1)

    blkGrp2 = createBlockGroupConfig(range(run.firstVolPhase2, run.nVols),
                                     patterns)
    blkGrp2.blkGrpId = 2
    blkGrp2.nTRs = run.nVols - run.firstVolPhase2
    blockGroups.append(blkGrp2)

    run.blockGroups = blockGroups
    return run
Beispiel #29
0
    def TrainModel(self, msg):
        """Load block group patterns data from this and the previous run and
        create the ML model for the next run. Save the model to a file.
        """
        reply = super().TrainModel(msg)
        trainStart = time.time()  # start timing

        # load data to train model
        trainCfg = msg.fields.cfg
        bgRef1 = StructDict(trainCfg.blkGrpRefs[0])
        bgRef2 = StructDict(trainCfg.blkGrpRefs[1])
        try:
            bg1 = self.getPrevBlkGrp(self.id_fields.sessionId, bgRef1.run,
                                     bgRef1.phase)
            bg2 = self.getPrevBlkGrp(self.id_fields.sessionId, bgRef2.run,
                                     bgRef2.phase)
        except Exception as err:
            errorReply = self.createReplyMessage(msg, MsgResult.Error)
            if bg1 is None:
                errorReply.data = "Error: getPrevBlkGrp(%r, %r, %r): %r" %\
                    (self.id_fields.sessionId, bgRef1.run, bgRef1.phase, err)
            else:
                errorReply.data = "Error: getPrevBlkGrp(%r, %r, %r): %r" %\
                    (self.id_fields.sessionId, bgRef2.run, bgRef2.phase, err)
            return errorReply

        trainIdx1 = utils.find(np.any(bg1.patterns.regressor, axis=0))
        trainLabels1 = np.transpose(bg1.patterns.regressor[:, trainIdx1]
                                    )  # find the labels of those indices
        trainPats1 = bg1.patterns.raw_sm_filt_z[
            trainIdx1, :]  # retrieve the patterns of those indices

        trainIdx2 = utils.find(np.any(bg2.patterns.regressor, axis=0))
        trainLabels2 = np.transpose(bg2.patterns.regressor[:, trainIdx2])
        trainPats2 = bg2.patterns.raw_sm_filt_z[trainIdx2, :]

        trainPats = np.concatenate((trainPats1, trainPats2))
        trainLabels = np.concatenate((trainLabels1, trainLabels2))
        trainLabels = trainLabels.astype(np.uint8)

        # train the model
        # sklearn LogisticRegression takes on set of labels and returns one set of weights.
        # The version implemented in Matlab can take multple sets of labels and return multiple weights.
        # To reproduct that behavior here, we will use a LogisticRegression instance for each set of lables (2 in this case)
        lrc1 = LogisticRegression(solver='saga', penalty='l2', max_iter=300)
        lrc2 = LogisticRegression(solver='saga', penalty='l2', max_iter=300)
        lrc1.fit(trainPats, trainLabels[:, 0])
        lrc2.fit(trainPats, trainLabels[:, 1])
        newTrainedModel = utils.MatlabStructDict({}, 'trainedModel')
        newTrainedModel.trainedModel = StructDict({})
        newTrainedModel.trainedModel.weights = np.concatenate(
            (lrc1.coef_.T, lrc2.coef_.T), axis=1)
        newTrainedModel.trainedModel.biases = np.concatenate(
            (lrc1.intercept_, lrc2.intercept_)).reshape(1, 2)
        newTrainedModel.trainPats = trainPats
        newTrainedModel.trainLabels = trainLabels
        newTrainedModel.FWHM = self.session.FWHM
        newTrainedModel.cutoff = self.session.cutoff
        newTrainedModel.gitCodeId = utils.getGitCodeId()

        trainEnd = time.time()  # end timing
        trainingOnlyTime = trainEnd - trainStart

        # print training timing and results
        reply.fields.outputlns.append('Model training completed')
        outStr = 'Model training time: \t{:.3f}'.format(trainingOnlyTime)
        reply.fields.outputlns.append(outStr)
        if newTrainedModel.biases is not None:
            outStr = 'Model biases: \t{:.3f}\t{:.3f}'.format(
                newTrainedModel.biases[0, 0], newTrainedModel.biases[0, 1])
            reply.fields.outputlns.append(outStr)

        # cache the trained model
        self.modelCache[self.id_fields.runId] = newTrainedModel

        if self.session.validate:
            try:
                self.validateModel(newTrainedModel, reply.fields.outputlns)
            except Exception as err:
                # Just log that an error happened during validation
                logging.error("validateModel: %r", err)
                pass
        # write trained model to a file
        filename = getModelFilename(self.id_fields.sessionId,
                                    self.id_fields.runId)
        trainedModel_fn = os.path.join(self.dirs.dataDir, filename)
        try:
            sio.savemat(trainedModel_fn, newTrainedModel, appendmat=False)
        except Exception as err:
            errorReply = self.createReplyMessage(msg, MsgResult.Error)
            errorReply.data = "Error: Unable to save trainedModel %s: %s" % (
                filename, str(err))
            return errorReply
        return reply
Beispiel #30
0
                           type=str,
                           help='Comma separated list of scan number')
    argParser.add_argument('--run-local',
                           '-l',
                           default=False,
                           action='store_true',
                           help='run client and server together locally')
    argParser.add_argument('--webpipe',
                           '-w',
                           default=None,
                           type=str,
                           help='Named pipe to communicate with webServer')
    argParser.add_argument('--webfilesremote',
                           '-x',
                           default=False,
                           action='store_true',
                           help='dicom files retrieved from remote server')
    args = argParser.parse_args()
    params = StructDict({
        'addr': args.addr,
        'port': args.port,
        'run_local': args.run_local,
        'model': args.model,
        'experiment': args.experiment,
        'runs': args.runs,
        'scans': args.scans,
        'webpipe': args.webpipe,
        'webfilesremote': args.webfilesremote
    })
    ClientMain(params)