Ejemplo n.º 1
0
    def testOnNumpyArray(self):

        na = np.random.randn(4,4)
        d = { 'k1' : 'v1',
              'd2' : {'d2k1':0, 1:190, 'na':na,
                      'd2-3' : {'plop':'plip',435:'fr'}},
              'k3' : 1098,
              }

        sd1 = dictToString(d)
        #print sd1
        naf = {'precision':2, 'max_line_width':100}
        sd2 = dictToString(d, numpyArrayFormat=naf)
Ejemplo n.º 2
0
def append_common_treatment_options(parser):
    parser.add_option('-s','--spm-mat-file', metavar='MATFILE', dest='spmFile',
                      default=None,
                      help='SPM.mat from which to extract paradigm data '\
                      '(onsets and stimulus durations) and TR. Note: if '\
                      'the option "-p/--paradigm" is also provided, then'\
                      ' the latter is ignored.')

    paradigms = ['loc_av', 'loc', 'loc_cp_only', 'loc_cpcd', 'language',
                 'loc_ainsi', 'loc_ainsi_cpcd']
    parser.add_option('-p','--paradigm', dest='paradigm', default=paradigms[0],
                      metavar='STRING', type='choice', choices=paradigms,
                      help='Paradigm to use, choices are: '+ \
                          string.join(paradigms,',') + '. Default is %default.'\
                          ' Note: ignored if option "-s/--spm-mat-file" is '\
                          'provided.')

    parser.add_option('-r','--paradigm-csv', dest='paradigm_csv',
                      default=None,
                      metavar='CSVFILE',
                      help='Paradigm CSV file input')
    parser.add_option('-I','--time-repetition', dest='tr',
                        default=None,
                        metavar='FLOAT',type='float',
                        help='Repetition time')


    inputTypes = ['volume', 'surface', 'simulation']
    parser.add_option('-d','--data-type', type='choice', choices=inputTypes,
                      metavar='STRING', dest='inputDataType', default='volume',
                      help='Define the type of input data, choices are: ' + \
                          string.join(inputTypes,',') + ' default is %default')

    data_choices = ['default', 'small', 'realistic']
    parser.add_option('-t', choices=data_choices, dest='data_scenario',
                      metavar='STRING', default='default',
                      help='Scenario for default data set: %s.' \
                          %', '.join(data_choices))


    parser.add_option('-v','--verbose',dest='verbose',metavar='INTEGER',
                      type='int',default=0,
                      help=dictToString(pyhrf.verboseLevels))

    parser.add_option('-f','--func-data-file', action='append',
                      dest='func_data_file',
                      metavar='FILE', default=None,
                      help='Functional data file (BOLD signal).')

    parser.add_option('-k','--mask-file',dest='mask_file',
                      metavar='FILE', default=None,
                      help='Functional mask file '\
                          '(n-ary, may be a parcellation).')

    parser.add_option('-g','--mesh-file',dest='mesh_file',
                      metavar='FILE', default=None,
                      help='Mesh file (only for surface analysis)')
Ejemplo n.º 3
0
    def get_info(self, long=True):
        s = ''
        s += 'sessionDurations: %s\n' %str(self.sessionDurations)
        allOns = []
        for oc in self.stimOnsets.values(): #parse conditions
            for o in oc: #parse sessions
                allOns.extend(o)

        allOns = np.array(allOns)
        allOns.sort()
        last = allOns.max()
        meanISI = np.diff(allOns).mean()
        stdISI = np.diff(allOns).std()
        s += ' - onsets : ISI=%1.2f(%1.2f)sec' \
             ' - last event: %1.2fsec\n' %(meanISI, stdISI, last)
        for stimName in self.stimOnsets.keys():
            ntrials = [len(o) for o in self.stimOnsets[stimName]]
            s += '     >> %s, trials per session: %s\n' %(stimName,str(ntrials))
            if long:
                s += dictToString(self.stimOnsets[stimName], prefix='     ')
                s += dictToString(self.stimDurations[stimName], prefix='     ')
        return s
Ejemplo n.º 4
0
def run_pyhrf_cmd_treatment(cfg_cmd, exec_cmd, default_cfg_file,
                            default_profile_file, label_for_cluster):


    usage = 'usage: %%prog [options]'

    description = 'Manage a joint detection-estimation treatment of fMRI data.' \
                'This command runs the treatment defined in an xml '\
                'parameter file. See pyhrf_jde_buildcfg command to build a'\
                'template of such a file. If no xml file found, then runs a '\
                'default example analysis.'

    parser = OptionParser(usage=usage, description=description)

    parser.add_option('-c','--input-cfg-file', metavar='XMLFILE', dest='cfgFile',
                    default=default_cfg_file,
                    help='Configuration file: XML file containing parameters'\
                    ' defining input data and analysis to perform.')

    parser.add_option('-r','--roi-data', metavar='PICKLEFILE', dest='roidata',
                    default=None, help='Input fMRI ROI data. The data '\
                    'definition part in the config file is ignored.')

    parser.add_option('-t','--treatment_pck',
                      metavar='PICKLEFILE', dest='treatment_pck',
                      default=None, help='Input treatment as a pickle dump.' \
                          'The XML cfg file is ignored')

    parser.add_option('-s','--stop-on-error', dest='stop_on_error',
                      action='store_true',
                    default=False, help='For debug: do not continue if error' \
                          ' during one ROI analysis')


    parser.add_option('-v','--verbose',dest='verbose',metavar='INTEGER',
                    type='int',default=0,
                    help=dictToString(pyhrf.verboseLevels))

    parser.add_option('-p','--profile',action='store_true', default=False,
                    help='Enable profiling of treatment. Store profile data in '\
                        '%s. NOTE: not avalaible in parallel mode.'\
                    %default_profile_file)

    parallel_choices = ['LAN','local','cluster']
    parser.add_option('-x','--parallel', choices=parallel_choices,
                    help='Parallel processing. Choices are %s'\
                        %string.join(parallel_choices,', '))


    (options,args) = parser.parse_args()

    pyhrf.verbose.set_verbosity(options.verbose)

    t0 = time.time()

    if options.treatment_pck is not None:
        f = open(options.treatment_pck)
        treatment = cPickle.load(f)
        f.close()
    else:
        if not os.path.exists(options.cfgFile):
            print 'Error: could not find default configuration file "%s"\n'\
                'Consider running "%s" to generate it.' \
                %(options.cfgFile, cfg_cmd)
            sys.exit(1)
        else:
            pyhrf.verbose(1, 'Loading configuration from: "%s" ...' \
                              %options.cfgFile)
            f = open(options.cfgFile, 'r')
            sXml = string.join(f.readlines())
            f.close()
            treatment = xmlio.fromXML(sXml)
            if 0:
                sXml = xmlio.toXML(treatment,
                                   handler=xmlio.xmlnumpy.NumpyXMLHandler())
                f = './treatment_cmd.xml'
                fOut = open(f,'w')
                fOut.write(sXml)
                fOut.close()
            #f = open(fOut, 'w')
            #cPickle.dump(treatment, f)
            #f.close()


    treatment.analyser.set_pass_errors(not options.stop_on_error)

    if options.parallel is not None:

        # tmpDir = tempfile.mkdtemp(prefix='pyhrf',
        #                           dir=pyhrf.cfg['global']['tmp_path'])
        # pyhrf.verbose(1, 'Tmpdir: %s' %tmpDir)

        treatment.run(parallel=options.parallel)

    else:
        if options.roidata is not None:
            #treatment.set_roidata(options.roidata)
            pyhrf.verbose(1, 'Loading ROI data from: "%s" ...' \
                              %options.roidata)

            roidata = cPickle.load(open(options.roidata))
            roidata.verbosity = pyhrf.verbose.verbosity
            if pyhrf.verbose > 1:
                print roidata.getSummary()
            #TODO: enable profiling
            pyhrf.verbose(1, 'Launching analysis ...')
            if options.profile:
                cProfile.runctx("result = treatment.analyser(roidata)",
                                globals(),
                                {'treatment':treatment,'roidata': roidata},
                                default_profile_file)
            else:
                result = treatment.analyser(roidata)
            outPath = op.dirname(op.abspath(options.roidata))
            fOut = op.join(outPath,"result_%04d.pck" %roidata.get_roi_id())
            pyhrf.verbose(1, 'Dumping results to %s ...' %fOut)
            f = open(fOut, 'w')
            cPickle.dump(result, f)
            f.close()
        else:
            pyhrf.verbose(1, 'ROI data is none')
            if options.profile:
                cProfile.runctx("treatment.run()", globals(),
                                {'treatment':treatment}, default_profile_file)
            else:
                #print 'treatment:', treatment
                treatment.run()

    pyhrf.verbose(1, 'Estimation done, took %s' %format_duration(time.time() - t0))