Exemple #1
0
def run(args):
    if not args.store is None and args.output is None:
        raise ValueError("--output is require for result storage")
    if not args.data is None:
        dss = [arg2ds(d) for d in args.data]
        if len(dss):
            # convenience short-cut
            ds = dss[0]
    try:
        import nose.tools as nt
    except ImportError:
        pass
    for expr in args.eval:
        if expr == '-':
            exec sys.stdin
        elif os.path.isfile(expr):
            execfile(expr, globals(), locals())
        else:
            exec expr
    if not args.store is None:
        out = {}
        for var in args.store:
            try:
                out[var] = locals()[var]
            except KeyError:
                warning("'%s' not found in local name space -- skipped." % var)
        if len(out):
            ds2hdf5(out, args.output, compression=args.hdf5_compression)
Exemple #2
0
def _transform_dss(srcs, mappers, args):
    if __debug__:
        debug('CMDLINE', "loading to-be-transformed data from %s" % srcs)
    dss = [arg2ds(d) for d in srcs]
    verbose(1, "Loaded %i to-be-transformed datasets" % len(dss))
    if __debug__:
        debug('CMDLINE', "transform datasets")
    tdss = [ mappers[i].forward(td) for i, td in enumerate(dss)]
    return tdss, dss
Exemple #3
0
def run(args):
    print args.data
    dss = [arg2ds(d)[:,:100] for d in args.data]
    verbose(1, "Loaded %i input datasets" % len(dss))
    if __debug__:
        for i, ds in enumerate(dss):
            debug('CMDLINE', "dataset %i: %s" % (i, str(ds)))
    # TODO at this point more check could be done, e.g. ref_ds > len(dss)
    # assemble parameters
    params = dict([(param, getattr(args, param)) for param in _supported_parameters])
    if __debug__:
        debug('CMDLINE', "configured parameters: '%s'" % params)
    # assemble CAs
    enabled_ca = [ca for ca in _supported_cas if getattr(args, ca)]
    if __debug__:
        debug('CMDLINE', "enabled conditional attributes: '%s'" % enabled_ca)
    hyper = Hyperalignment(enable_ca=enabled_ca,
                           alignment=ProcrusteanMapper(svd='dgesvd',
                                                       space='commonspace'),
                           **params)
    verbose(1, "Running hyperalignment")
    promappers = hyper(dss)
    verbose(2, "Alignment reference is dataset %i" % hyper.ca.chosen_ref_ds)
    verbose(1, "Writing output")
    # save on memory and remove the training data
    del dss
    if args.commonspace:
        if __debug__:
            debug('CMDLINE', "write commonspace as hdf5")
        h5save('%s%s.hdf5' % (args.output_prefix,
                              _output_specs['commonspace']['output_suffix']),
               hyper.commonspace,
               compression=args.hdf5_compression)
    for ca in _supported_cas:
        if __debug__:
            debug('CMDLINE', "check conditional attribute: '%s'" % ca)
        if getattr(args, ca):
            if __debug__:
                debug('CMDLINE', "store conditional attribute: '%s'" % ca)
            np.savetxt('%s%s' % (args.output_prefix,
                                 _supported_cas[ca]['output_suffix']),
                       hyper.ca[ca].value.samples)
    if args.store_transformation:
        for i, pm in enumerate(promappers):
            if __debug__:
                debug('CMDLINE', "store mapper %i: %s" % (i, str(pm)))
            h5save('%s%s.hdf5' % (args.output_prefix, '_map%.3i' % i),
                   pm, compression=args.hdf5_compression)
    if args.transform:
        tdss, dss = _transform_dss(args.transform, promappers, args)
        del dss
        verbose(1, "Store transformed datasets")
        for i, td in enumerate(tdss):
            if __debug__:
                debug('CMDLINE', "store transformed data %i: %s" % (i, str(td)))
            h5save('%s%s.hdf5' % (args.output_prefix, '_transformed%.3i' % i),
                   td, compression=args.hdf5_compression)
Exemple #4
0
def run(args):
    ds = arg2ds(args.data)
    verbose(3, "Concatenation yielded %i samples with %i features" % ds.shape)
    if args.numpy_xfm is not None:
        from mvpa2.mappers.fx import FxMapper

        fx, axis = args.numpy_xfm
        mapper = FxMapper(axis, fx)
        ds = ds.get_mapped(mapper)
    info_fx[args.report](ds, args)
Exemple #5
0
def run(args):
    ds = arg2ds(args.data)
    verbose(3, 'Concatenation yielded %i samples with %i features' % ds.shape)
    # build list of events
    events = []
    timebased_events = False
    if args.event_attrs is not None:
        def_attrs = dict([(k, ds.sa[k].value) for k in args.event_attrs])
        events = find_events(**def_attrs)
    elif args.csv_events is not None:
        if args.csv_events == '-':
            csv = sys.stdin.read()
            import cStringIO
            csv = cStringIO.StringIO(csv)
        else:
            csv = open(args.csv_events, 'rU')
        csvt = _load_csv_table(csv)
        if not len(csvt):
            raise ValueError("no CSV columns found")
        if args.onset_column:
            csvt['onset'] = csvt[args.onset_column]
        nevents = len(csvt[csvt.keys()[0]])
        events = []
        for ev in xrange(nevents):
            events.append(dict([(k, v[ev]) for k, v in csvt.iteritems()]))
    elif args.onsets is not None:
        if not len(args.onsets):
            args.onsets = [i for i in sys.stdin]
        # time or sample-based?
        if args.time_attr is None:
            oconv = int
        else:
            oconv = float
        events = [{'onset': oconv(o)} for o in args.onsets]
    elif args.fsl_ev3 is not None:
        timebased_events = True
        from mvpa2.misc.fsl import FslEV3
        events = []
        for evsrc in args.fsl_ev3:
            events.extend(FslEV3(evsrc).to_events())
    if not len(events):
        raise ValueError("no events defined")
    verbose(2, 'Extracting %i events' % len(events))
    if args.event_compression is None:
        evmap = None
    elif args.event_compression == 'mean':
        evmap = FxMapper('features', np.mean, attrfx=merge2first)
    elif args.event_compression == 'median':
        evmap = FxMapper('features', np.median, attrfx=merge2first)
    elif args.event_compression == 'min':
        evmap = FxMapper('features', np.min, attrfx=merge2first)
    elif args.event_compression == 'max':
        evmap = FxMapper('features', np.max, attrfx=merge2first)
    # convert to event-related ds
    evds = eventrelated_dataset(ds, events, time_attr=args.time_attr,
                                match=args.match_strategy,
                                event_offset=args.offset,
                                event_duration=args.duration,
                                event_mapper=evmap)
    # act on all attribute options
    evds = process_common_dsattr_opts(evds, args)
    # and store
    ds2hdf5(evds, args.output, compression=args.hdf5_compression)
    return evds
    acc_nii_filename = '{}-acc.nii.gz'.format(output_basename)
    acc_results.to_filename(acc_nii_filename)
    sl_map.samples *= -1
    sl_map.samples += 1
    niftiresults = map2nifti(sl_map,
                             imghdr=glm_dataset.a.imghdr)
    niftiresults.to_filename('{}-err.nii.gz'.format(output_basename))
    # TODO: check p value map
    if with_null_prob:
        nullt_results = map2nifti(sl_map, data=sl.ca.null_t,
                                  imghdr=glm_dataset.a.imghdr)
        nullt_results.to_filename('{}-t.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map, data=sl.ca.null_prob,
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-prob.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map, data=distr_est.cdf(sl_map.samples),
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-cdf.nii.gz'.format(output_basename))
    return sl_map


if __name__ == '__main__':
    filename = sys.argv[1]
    radius = int(sys.argv[2])
    print filename
    output_basename = os.path.join('{}_r{}_c-{}'.format(filename, radius, 'linear'))
    print output_basename
    ds = arg2ds([filename])
    do_searchlight(ds, radius, output_basename,
                   False)
Exemple #7
0
def run(args):
    if os.path.isfile(args.payload) and args.payload.endswith('.py'):
        measure = script2obj(args.payload)
    elif args.payload == 'cv':
        if args.cv_learner is None or args.cv_partitioner is None:
            raise ValueError('cross-validation payload requires --learner and --partitioner')
        # get CV instance
        measure = get_crossvalidation_instance(
                    args.cv_learner, args.cv_partitioner, args.cv_errorfx,
                    args.cv_sampling_repetitions, args.cv_learner_space,
                    args.cv_balance_training, args.cv_permutations,
                    args.cv_avg_datafold_results, args.cv_prob_tail)
    else:
        raise RuntimeError("this should not happen")
    ds = arg2ds(args.data)
    if args.ds_preproc_fx is not None:
        ds = args.ds_preproc_fx(ds)
    # setup neighborhood
    # XXX add big switch to allow for setting up surface-based neighborhoods
    from mvpa2.misc.neighborhood import IndexQueryEngine
    qe = IndexQueryEngine(**dict(args.neighbors))
    # determine ROIs
    rids = None     # all by default
    aggregate_fx = args.aggregate_fx
    if args.roi_attr is not None:
        # first figure out which roi features should be processed
        if len(args.roi_attr) == 1 and args.roi_attr[0] in ds.fa.keys():
            # name of an attribute -> pull non-zeroes
            rids = ds.fa[args.roi_attr[0]].value.nonzero()[0]
        else:
            # an expression?
            from .cmd_select import _eval_attr_expr
            rids = _eval_attr_expr(args.roi_attr, ds.fa).nonzero()[0]

    seed_ids = None
    if args.scatter_rois is not None:
        # scatter_neighborhoods among available ids if was requested
        from mvpa2.misc.neighborhood import scatter_neighborhoods
        attr, nb = args.scatter_rois
        coords = ds.fa[attr].value
        if rids is not None:
            # select only those which were chosen by ROI
            coords = coords[rids]
        _, seed_ids = scatter_neighborhoods(nb, coords)
        if aggregate_fx is None:
            # no custom one given -> use default "fill in" function
            aggregate_fx = _fill_in_scattered_results
            if args.enable_ca is None:
                args.enable_ca = ['roi_feature_ids']
            elif 'roi_feature_ids' not in args.enable_ca:
                args.enable_ca += ['roi_feature_ids']

    if seed_ids is None:
        roi_ids = rids
    else:
        if rids is not None:
            # we had to sub-select by scatterring among available rids
            # so we would need to get original ids
            roi_ids = rids[seed_ids]
        else:
            # scattering happened on entire feature-set
            roi_ids = seed_ids

    verbose(3, 'Attempting %i ROI analyses'
               % ((roi_ids is None) and ds.nfeatures or len(roi_ids)))

    from mvpa2.measures.searchlight import Searchlight

    sl = Searchlight(measure,
                     queryengine=qe,
                     roi_ids=roi_ids,
                     nproc=args.nproc,
                     results_backend=args.multiproc_backend,
                     results_fx=aggregate_fx,
                     enable_ca=args.enable_ca,
                     disable_ca=args.disable_ca)
    # XXX support me too!
    #                 add_center_fa
    #                 tmp_prefix
    #                 nblocks
    #                 null_dist
    # run
    res = sl(ds)
    if (seed_ids is not None) and ('mapper' in res.a):
        # strip the last mapper link in the chain, which would be the seed ID selection
        res.a['mapper'] = res.a.mapper[:-1]
    # XXX create more output
    # and store
    ds2hdf5(res, args.output, compression=args.hdf5_compression)
    return res
Exemple #8
0
def run(args):
    ds = arg2ds(args.data)
    # What?
    if args.samples:
        dumpy = ds.samples
    elif not ((args.sa is None) and (args.fa is None) and (args.da is None)):
        for attr, col in ((args.sa, ds.sa), (args.fa, ds.fa), (args.da, ds.a)):
            if attr is None:
                continue
            try:
                dumpy = col[attr].value
            except KeyError:
                raise ValueError("unknown attribute '%s', known are %s)" % (attr, col.keys()))
    else:
        raise ValueError("no dataset component chosen")
    # How?
    if args.format == "txt":
        if args.output:
            out = open(args.output, "w")
        else:
            out = sys.stdout
        try:
            # trying to write numerical data
            fmt = None
            if np.issubdtype(dumpy.dtype, int):
                fmt = "%i"
            elif np.issubdtype(dumpy.dtype, float):
                fmt = "%G"
            if fmt is None:
                np.savetxt(out, dumpy)
            else:
                np.savetxt(out, dumpy, fmt=fmt)
        except:
            # it could be something 1d that we can try to print
            if hasattr(dumpy, "shape") and len(dumpy.shape) == 1:
                for v in dumpy:
                    print v
            else:
                warning("conversion to plain text is not supported for " "this data type")
                # who knows what it is
                out.write(repr(dumpy))
        if not out is sys.stdout:
            out.close()
    elif args.format == "hdf5":
        from mvpa2.base.hdf5 import h5save

        _check_output(args)
        if not args.output.endswith(".hdf5"):
            args.output += ".hdf5"
        h5save(args.output, dumpy)
    elif args.format == "npy":
        _check_output(args)
        np.save(args.output, dumpy)
    elif args.format == "nifti":
        _check_output(args)
        if args.fa:
            # need to wrap into a length-1 sequence to survive rev-mapping
            # properly
            # TODO maybe we should allow more complex transformations, e.g.
            # 2d features may just need a transpose() to fit into a NIfTI
            dumpy = dumpy[None]
        if args.mapperds is None:
            mapperds = ds
        else:
            mapperds = arg2ds(args.mapperds)
        to_nifti(dumpy, mapperds, args)
    return ds
Exemple #9
0
def run(args):
    ds = arg2ds(args.data)
    verbose(3, 'Concatenation yielded %i samples with %i features' % ds.shape)
    # build list of events
    events = []
    timebased_events = False
    if not args.event_attrs is None:
        def_attrs = dict([(k, ds.sa[k].value) for k in args.event_attrs])
        events = find_events(**def_attrs)
    elif not args.csv_events is None:
        if args.csv_events == '-':
            csv = sys.stdin.read()
            import cStringIO
            csv = cStringIO.StringIO(csv)
        else:
            csv = open(args.csv_events, 'rU')
        csvt = _load_csv_table(csv)
        if not len(csvt):
            raise ValueError("no CSV columns found")
        if args.onset_column:
            csvt['onset'] = csvt[args.onset_column]
        nevents = len(csvt[csvt.keys()[0]])
        events = []
        for ev in xrange(nevents):
            events.append(dict([(k, v[ev]) for k, v in csvt.iteritems()]))
    elif not args.onsets is None:
        if not len(args.onsets):
            args.onsets = [i for i in sys.stdin]
        # time or sample-based?
        if args.time_attr is None:
            oconv = int
        else:
            oconv = float
        events = [{'onset': oconv(o)} for o in args.onsets]
    elif not args.fsl_ev3 is None:
        timebased_events = True
        from mvpa2.misc.fsl import FslEV3
        events = []
        for evsrc in args.fsl_ev3:
            events.extend(FslEV3(evsrc).to_events())
    if not len(events):
        raise ValueError("no events defined")
    verbose(2, 'Extracting %i events' % len(events))
    if args.offset:
        # shift events
        for ev in events:
            ev['onset'] += args.offset
    if args.duration:
        # overwrite duration
        for ev in events:
            ev['duration'] = args.duration
    if args.event_compression is None:
        evmap = None
    elif args.event_compression == 'mean':
        evmap = FxMapper('features', np.mean, attrfx=merge2first)
    elif args.event_compression == 'median':
        evmap = FxMapper('features', np.median, attrfx=merge2first)
    elif args.event_compression == 'min':
        evmap = FxMapper('features', np.min, attrfx=merge2first)
    elif args.event_compression == 'max':
        evmap = FxMapper('features', np.max, attrfx=merge2first)
    # convert to event-related ds
    evds = eventrelated_dataset(ds,
                                events,
                                time_attr=args.time_attr,
                                match=args.match_strategy,
                                event_mapper=evmap)
    # act on all attribute options
    evds = process_common_dsattr_opts(evds, args)
    # and store
    ds2hdf5(evds, args.output, compression=args.hdf5_compression)
    return evds
Exemple #10
0
def run(args):
    if os.path.isfile(args.payload) and args.payload.endswith('.py'):
        measure = script2obj(args.payload)
    elif args.payload == 'cv':
        if args.cv_learner is None or args.cv_partitioner is None:
            raise ValueError('cross-validation payload requires --learner and --partitioner')
        # get CV instance
        measure = get_crossvalidation_instance(
                    args.cv_learner, args.cv_partitioner, args.cv_errorfx,
                    args.cv_sampling_repetitions, args.cv_learner_space,
                    args.cv_balance_training, args.cv_permutations,
                    args.cv_avg_datafold_results, args.cv_prob_tail)
    else:
        raise RuntimeError("this should not happen")
    ds = arg2ds(args.data)
    if args.ds_preproc_fx is not None:
        ds = args.ds_preproc_fx(ds)
    # setup neighborhood
    # XXX add big switch to allow for setting up surface-based neighborhoods
    from mvpa2.misc.neighborhood import IndexQueryEngine
    qe = IndexQueryEngine(**dict(args.neighbors))
    # determine ROIs
    rids = None     # all by default
    aggregate_fx = args.aggregate_fx
    if args.roi_attr is not None:
        # first figure out which roi features should be processed
        if len(args.roi_attr) == 1 and args.roi_attr[0] in list(ds.fa.keys()):
            # name of an attribute -> pull non-zeroes
            rids = ds.fa[args.roi_attr[0]].value.nonzero()[0]
        else:
            # an expression?
            from .cmd_select import _eval_attr_expr
            rids = _eval_attr_expr(args.roi_attr, ds.fa).nonzero()[0]

    seed_ids = None
    if args.scatter_rois is not None:
        # scatter_neighborhoods among available ids if was requested
        from mvpa2.misc.neighborhood import scatter_neighborhoods
        attr, nb = args.scatter_rois
        coords = ds.fa[attr].value
        if rids is not None:
            # select only those which were chosen by ROI
            coords = coords[rids]
        _, seed_ids = scatter_neighborhoods(nb, coords)
        if aggregate_fx is None:
            # no custom one given -> use default "fill in" function
            aggregate_fx = _fill_in_scattered_results
            if args.enable_ca is None:
                args.enable_ca = ['roi_feature_ids']
            elif 'roi_feature_ids' not in args.enable_ca:
                args.enable_ca += ['roi_feature_ids']

    if seed_ids is None:
        roi_ids = rids
    else:
        if rids is not None:
            # we had to sub-select by scatterring among available rids
            # so we would need to get original ids
            roi_ids = rids[seed_ids]
        else:
            # scattering happened on entire feature-set
            roi_ids = seed_ids

    verbose(3, 'Attempting %i ROI analyses'
               % ((roi_ids is None) and ds.nfeatures or len(roi_ids)))

    from mvpa2.measures.searchlight import Searchlight

    sl = Searchlight(measure,
                     queryengine=qe,
                     roi_ids=roi_ids,
                     nproc=args.nproc,
                     results_backend=args.multiproc_backend,
                     results_fx=aggregate_fx,
                     enable_ca=args.enable_ca,
                     disable_ca=args.disable_ca)
    # XXX support me too!
    #                 add_center_fa
    #                 tmp_prefix
    #                 nblocks
    #                 null_dist
    # run
    res = sl(ds)
    if (seed_ids is not None) and ('mapper' in res.a):
        # strip the last mapper link in the chain, which would be the seed ID selection
        res.a['mapper'] = res.a.mapper[:-1]
    # XXX create more output
    # and store
    ds2hdf5(res, args.output, compression=args.hdf5_compression)
    return res
Exemple #11
0
def run(args):
    ds = arg2ds(args.data)
    # What?
    if args.samples:
        dumpy = ds.samples
    elif not ((args.sa is None) and (args.fa is None) and (args.da is None)):
        for attr, col in ((args.sa, ds.sa), (args.fa, ds.fa), (args.da, ds.a)):
            if attr is None:
                continue
            try:
                dumpy = col[attr].value
            except KeyError:
                raise ValueError("unknown attribute '%s', known are %s)" %
                                 (attr, col.keys()))
    else:
        raise ValueError('no dataset component chosen')
    # How?
    if args.format == 'txt':
        if args.output:
            out = open(args.output, 'w')
        else:
            out = sys.stdout
        try:
            # trying to write numerical data
            fmt = None
            if np.issubdtype(dumpy.dtype, int):
                fmt = '%i'
            elif np.issubdtype(dumpy.dtype, float):
                fmt = '%G'
            if fmt is None:
                np.savetxt(out, dumpy)
            else:
                np.savetxt(out, dumpy, fmt=fmt)
        except:
            # it could be something 1d that we can try to print
            if hasattr(dumpy, 'shape') and len(dumpy.shape) == 1:
                for v in dumpy:
                    print v
            else:
                warning("conversion to plain text is not supported for "
                        "this data type")
                # who knows what it is
                out.write(repr(dumpy))
        if not out is sys.stdout:
            out.close()
    elif args.format == 'hdf5':
        from mvpa2.base.hdf5 import h5save
        _check_output(args)
        if not args.output.endswith('.hdf5'):
            args.output += '.hdf5'
        h5save(args.output, dumpy)
    elif args.format == 'npy':
        _check_output(args)
        np.save(args.output, dumpy)
    elif args.format == 'nifti':
        _check_output(args)
        if args.fa:
            # need to wrap into a length-1 sequence to survive rev-mapping
            # properly
            # TODO maybe we should allow more complex transformations, e.g.
            # 2d features may just need a transpose() to fit into a NIfTI
            dumpy = dumpy[None]
        if args.mapperds is None:
            mapperds = ds
        else:
            mapperds = arg2ds(args.mapperds)
        to_nifti(dumpy, mapperds, args)
    return ds
Exemple #12
0
def run(args):
    if args.chunks is not None:
        # apply global "chunks" setting
        for cattr in ("detrend_chunks", "zscore_chunks"):
            if getattr(args, cattr) is None:
                # only overwrite if individual option is not given
                args.__setattr__(cattr, args.chunks)
    ds = arg2ds(args.data)
    if args.poly_detrend is not None:
        if args.detrend_chunks is not None and not args.detrend_chunks in ds.sa:
            raise ValueError("--detrend-chunks attribute '%s' not found in dataset" % args.detrend_chunks)
        from mvpa2.mappers.detrend import poly_detrend

        verbose(1, "Detrend")
        poly_detrend(
            ds,
            polyord=args.poly_detrend,
            chunks_attr=args.detrend_chunks,
            opt_regs=args.detrend_regrs,
            space=args.detrend_coords,
        )
    if args.filter_passband is not None:
        from mvpa2.mappers.filters import iir_filter
        from scipy.signal import butter, buttord

        if args.sampling_rate is None or args.filter_stopband is None:
            raise ValueError("spectral filtering requires specification of " "--filter-stopband and --sampling-rate")
        # determine filter type
        nyquist = args.sampling_rate / 2.0
        if len(args.filter_passband) > 1:
            btype = "bandpass"
            if not len(args.filter_passband) == len(args.filter_stopband):
                raise ValueError("passband and stopband specifications have to " "match in size")
            wp = [v / nyquist for v in args.filter_passband]
            ws = [v / nyquist for v in args.filter_stopband]
        elif args.filter_passband[0] < args.filter_stopband[0]:
            btype = "lowpass"
            wp = args.filter_passband[0] / nyquist
            ws = args.filter_stopband[0] / nyquist
        elif args.filter_passband[0] > args.filter_stopband[0]:
            btype = "highpass"
            wp = args.filter_passband[0] / nyquist
            ws = args.filter_stopband[0] / nyquist
        else:
            raise ValueError("invalid specification of Butterworth filter")
        # create filter
        verbose(1, "Spectral filtering (%s)" % (btype,))
        try:
            ord, wn = buttord(wp, ws, args.filter_passloss, args.filter_stopattenuation, analog=False)
            b, a = butter(ord, wn, btype=btype)
        except OverflowError:
            raise ValueError("cannot contruct Butterworth filter for the given " "specification")
        ds = iir_filter(ds, b, a)

    if args.zscore:
        from mvpa2.mappers.zscore import zscore

        verbose(1, "Z-score")
        zscore(ds, chunks_attr=args.zscore_chunks, params=args.zscore_params)
        verbose(3, "Dataset summary %s" % (ds.summary()))
    # invariants?
    if args.strip_invariant_features is not None:
        from mvpa2.datasets.miscfx import remove_invariant_features

        ds = remove_invariant_features(ds)
    # and store
    ds2hdf5(ds, args.output, compression=args.hdf5_compression)
    return ds
Exemple #13
0
    #TODO: save p value map
    if with_null_prob:
        nullt_results = map2nifti(sl_map,
                                  data=sl.ca.null_t,
                                  imghdr=glm_dataset.a.imghdr)
        nullt_results.to_filename('{}-t.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=sl.ca.null_prob,
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-prob.nii.gz'.format(output_basename))
        nullprob_results = map2nifti(sl_map,
                                     data=distr_est.cdf(sl_map.samples),
                                     imghdr=glm_dataset.a.imghdr)
        nullprob_results.to_filename('{}-cdf.nii.gz'.format(output_basename))


if __name__ == '__main__':
    filename = sys.argv[1]
    radius = int(sys.argv[2])
    print filename
    output_basename = os.path.join('{}_r{}_c-{}'.format(
        filename, radius, 'linear'))
    print output_basename
    ds = arg2ds([filename])
    do_searchlight(
        ds,
        radius,
        output_basename,
        #False)
        True)
def run(args):
    if args.chunks is not None:
        # apply global "chunks" setting
        for cattr in ('detrend_chunks', 'zscore_chunks'):
            if getattr(args, cattr) is None:
                # only overwrite if individual option is not given
                args.__setattr__(cattr, args.chunks)
    ds = arg2ds(args.data)
    if args.poly_detrend is not None:
        if args.detrend_chunks is not None \
           and not args.detrend_chunks in ds.sa:
            raise ValueError(
                "--detrend-chunks attribute '%s' not found in dataset" %
                args.detrend_chunks)
        from mvpa2.mappers.detrend import poly_detrend
        verbose(1, "Detrend")
        poly_detrend(ds,
                     polyord=args.poly_detrend,
                     chunks_attr=args.detrend_chunks,
                     opt_regs=args.detrend_regrs,
                     space=args.detrend_coords)
    if args.filter_passband is not None:
        from mvpa2.mappers.filters import iir_filter
        from scipy.signal import butter, buttord
        if args.sampling_rate is None or args.filter_stopband is None:
            raise ValueError("spectral filtering requires specification of "
                             "--filter-stopband and --sampling-rate")
        # determine filter type
        nyquist = args.sampling_rate / 2.0
        if len(args.filter_passband) > 1:
            btype = 'bandpass'
            if not len(args.filter_passband) == len(args.filter_stopband):
                raise ValueError(
                    "passband and stopband specifications have to "
                    "match in size")
            wp = [v / nyquist for v in args.filter_passband]
            ws = [v / nyquist for v in args.filter_stopband]
        elif args.filter_passband[0] < args.filter_stopband[0]:
            btype = 'lowpass'
            wp = args.filter_passband[0] / nyquist
            ws = args.filter_stopband[0] / nyquist
        elif args.filter_passband[0] > args.filter_stopband[0]:
            btype = 'highpass'
            wp = args.filter_passband[0] / nyquist
            ws = args.filter_stopband[0] / nyquist
        else:
            raise ValueError("invalid specification of Butterworth filter")
        # create filter
        verbose(1, "Spectral filtering (%s)" % (btype, ))
        try:
            ord, wn = buttord(wp,
                              ws,
                              args.filter_passloss,
                              args.filter_stopattenuation,
                              analog=False)
            b, a = butter(ord, wn, btype=btype)
        except OverflowError:
            raise ValueError(
                "cannot contruct Butterworth filter for the given "
                "specification")
        ds = iir_filter(ds, b, a)

    if args.zscore:
        from mvpa2.mappers.zscore import zscore
        verbose(1, "Z-score")
        zscore(ds, chunks_attr=args.zscore_chunks, params=args.zscore_params)
        verbose(3, "Dataset summary %s" % (ds.summary()))
    # invariants?
    if args.strip_invariant_features is not None:
        from mvpa2.datasets.miscfx import remove_invariant_features
        ds = remove_invariant_features(ds)
    # and store
    ds2hdf5(ds, args.output, compression=args.hdf5_compression)
    return ds
Exemple #15
0
def run(args):
    print(args.data)
    dss = [arg2ds(d)[:, :100] for d in args.data]
    verbose(1, "Loaded %i input datasets" % len(dss))
    if __debug__:
        for i, ds in enumerate(dss):
            debug('CMDLINE', "dataset %i: %s" % (i, str(ds)))
    # TODO at this point more check could be done, e.g. ref_ds > len(dss)
    # assemble parameters
    params = dict([(param, getattr(args, param))
                   for param in _supported_parameters])
    if __debug__:
        debug('CMDLINE', "configured parameters: '%s'" % params)
    # assemble CAs
    enabled_ca = [ca for ca in _supported_cas if getattr(args, ca)]
    if __debug__:
        debug('CMDLINE', "enabled conditional attributes: '%s'" % enabled_ca)
    hyper = Hyperalignment(enable_ca=enabled_ca,
                           alignment=ProcrusteanMapper(svd='dgesvd',
                                                       space='commonspace'),
                           **params)
    verbose(1, "Running hyperalignment")
    promappers = hyper(dss)
    verbose(2, "Alignment reference is dataset %i" % hyper.ca.chosen_ref_ds)
    verbose(1, "Writing output")
    # save on memory and remove the training data
    del dss
    if args.commonspace:
        if __debug__:
            debug('CMDLINE', "write commonspace as hdf5")
        h5save('%s%s.hdf5' % (args.output_prefix,
                              _output_specs['commonspace']['output_suffix']),
               hyper.commonspace,
               compression=args.hdf5_compression)
    for ca in _supported_cas:
        if __debug__:
            debug('CMDLINE', "check conditional attribute: '%s'" % ca)
        if getattr(args, ca):
            if __debug__:
                debug('CMDLINE', "store conditional attribute: '%s'" % ca)
            np.savetxt(
                '%s%s' %
                (args.output_prefix, _supported_cas[ca]['output_suffix']),
                hyper.ca[ca].value.samples)
    if args.store_transformation:
        for i, pm in enumerate(promappers):
            if __debug__:
                debug('CMDLINE', "store mapper %i: %s" % (i, str(pm)))
            h5save('%s%s.hdf5' % (args.output_prefix, '_map%.3i' % i),
                   pm,
                   compression=args.hdf5_compression)
    if args.transform:
        tdss, dss = _transform_dss(args.transform, promappers, args)
        del dss
        verbose(1, "Store transformed datasets")
        for i, td in enumerate(tdss):
            if __debug__:
                debug('CMDLINE',
                      "store transformed data %i: %s" % (i, str(td)))
            h5save('%s%s.hdf5' % (args.output_prefix, '_transformed%.3i' % i),
                   td,
                   compression=args.hdf5_compression)