def run(args): from mvpa2.base.hdf5 import h5save ds = None if not args.txt_data is None: verbose(1, "Load data from TXT file '%s'" % args.txt_data) samples = _load_from_txt(args.txt_data) ds = Dataset(samples) elif not args.npy_data is None: verbose(1, "Load data from NPY file '%s'" % args.npy_data) samples = _load_from_npy(args.npy_data) ds = Dataset(samples) elif not args.mri_data is None: verbose(1, "Load data from MRI image(s) %s" % args.mri_data) from mvpa2.datasets.mri import fmri_dataset vol_attr = dict() if not args.add_vol_attr is None: # XXX add a way to use the mapper of an existing dataset to # add a volume attribute without having to load the entire # mri data again vol_attr = dict(args.add_vol_attr) if not len(args.add_vol_attr) == len(vol_attr): warning("--vol-attr option with duplicate attribute name: " "check arguments!") verbose(2, "Add volumetric feature attributes: %s" % vol_attr) ds = fmri_dataset(args.mri_data, mask=args.mask, add_fa=vol_attr) if ds is None: if args.data is None: raise RuntimeError('no data source specific') else: ds = hdf2ds(args.data)[0] else: if args.data is not None: verbose( 1, 'ignoring dataset input in favor of other data source -- remove either one to disambiguate' ) # act on all attribute options ds = process_common_dsattr_opts(ds, args) if not args.add_fsl_mcpar is None: from mvpa2.misc.fsl.base import McFlirtParams mc_par = McFlirtParams(args.add_fsl_mcpar) for param in mc_par: verbose( 2, "Add motion regressor as sample attribute '%s'" % ('mc_' + param)) ds.sa['mc_' + param] = mc_par[param] verbose(3, "Dataset summary %s" % (ds.summary())) # and store outfilename = args.output if not outfilename.endswith('.hdf5'): outfilename += '.hdf5' verbose(1, "Save dataset to '%s'" % outfilename) h5save(outfilename, ds, mkdir=True, compression=args.hdf5_compression)
def run(args): from mvpa2.base.hdf5 import h5save ds = None if not args.txt_data is None: verbose(1, "Load data from TXT file '%s'" % args.txt_data) samples = _load_from_txt(args.txt_data) ds = Dataset(samples) elif not args.npy_data is None: verbose(1, "Load data from NPY file '%s'" % args.npy_data) samples = _load_from_npy(args.npy_data) ds = Dataset(samples) elif not args.mri_data is None: verbose(1, "Load data from MRI image(s) %s" % args.mri_data) from mvpa2.datasets.mri import fmri_dataset vol_attr = dict() if not args.add_vol_attr is None: # XXX add a way to use the mapper of an existing dataset to # add a volume attribute without having to load the entire # mri data again vol_attr = dict(args.add_vol_attr) if not len(args.add_vol_attr) == len(vol_attr): warning("--vol-attr option with duplicate attribute name: " "check arguments!") verbose(2, "Add volumetric feature attributes: %s" % vol_attr) ds = fmri_dataset(args.mri_data, mask=args.mask, add_fa=vol_attr) if ds is None: if args.data is None: raise RuntimeError('no data source specific') else: ds = hdf2ds(args.data)[0] else: if args.data is not None: verbose(1, 'ignoring dataset input in favor of other data source -- remove either one to disambiguate') # act on all attribute options ds = process_common_dsattr_opts(ds, args) if not args.add_fsl_mcpar is None: from mvpa2.misc.fsl.base import McFlirtParams mc_par = McFlirtParams(args.add_fsl_mcpar) for param in mc_par: verbose(2, "Add motion regressor as sample attribute '%s'" % ('mc_' + param)) ds.sa['mc_' + param] = mc_par[param] verbose(3, "Dataset summary %s" % (ds.summary())) # and store outfilename = args.output if not outfilename.endswith('.hdf5'): outfilename += '.hdf5' verbose(1, "Save dataset to '%s'" % outfilename) h5save(outfilename, ds, mkdir=True, compression=args.hdf5_compression)
def run(args): ds = arg2ds(args.data) verbose(3, 'Concatenation yielded %i samples with %i features' % ds.shape) # build list of events events = [] timebased_events = False if args.event_attrs is not None: def_attrs = dict([(k, ds.sa[k].value) for k in args.event_attrs]) events = find_events(**def_attrs) elif args.csv_events is not None: if args.csv_events == '-': csv = sys.stdin.read() import cStringIO csv = cStringIO.StringIO(csv) else: csv = open(args.csv_events, 'rU') csvt = _load_csv_table(csv) if not len(csvt): raise ValueError("no CSV columns found") if args.onset_column: csvt['onset'] = csvt[args.onset_column] nevents = len(csvt[csvt.keys()[0]]) events = [] for ev in xrange(nevents): events.append(dict([(k, v[ev]) for k, v in csvt.iteritems()])) elif args.onsets is not None: if not len(args.onsets): args.onsets = [i for i in sys.stdin] # time or sample-based? if args.time_attr is None: oconv = int else: oconv = float events = [{'onset': oconv(o)} for o in args.onsets] elif args.fsl_ev3 is not None: timebased_events = True from mvpa2.misc.fsl import FslEV3 events = [] for evsrc in args.fsl_ev3: events.extend(FslEV3(evsrc).to_events()) if not len(events): raise ValueError("no events defined") verbose(2, 'Extracting %i events' % len(events)) if args.event_compression is None: evmap = None elif args.event_compression == 'mean': evmap = FxMapper('features', np.mean, attrfx=merge2first) elif args.event_compression == 'median': evmap = FxMapper('features', np.median, attrfx=merge2first) elif args.event_compression == 'min': evmap = FxMapper('features', np.min, attrfx=merge2first) elif args.event_compression == 'max': evmap = FxMapper('features', np.max, attrfx=merge2first) # convert to event-related ds evds = eventrelated_dataset(ds, events, time_attr=args.time_attr, match=args.match_strategy, event_offset=args.offset, event_duration=args.duration, event_mapper=evmap) # act on all attribute options evds = process_common_dsattr_opts(evds, args) # and store ds2hdf5(evds, args.output, compression=args.hdf5_compression) return evds