예제 #1
0
    def __init__(self, env_subject):
        """Initialize the environment path for 'average' subject fsaverage_sym.

        Parameters
        ----------
        env_subject : string
            Path to directory containing subject fsaverage_sym.
        """
        add_subject_path(env_subject)
예제 #2
0
 def setUp(self):
     """Clean-up eventual existing client cache directory. Create fresh SCO
     client instance."""
     # Set file names for subject and image group files for upload
     self.SUBJECT_FILE = os.path.join(DATA_DIR, 'subjects/ernie.tar.gz')
     self.IMAGES_ARCHIVE = os.path.join(DATA_DIR, 'images/small-sample.tar')
     # Add Freesurfer subject path
     add_subject_path(ENV_DIR)
     # Delete data store directory if exists
     if os.path.isdir(CLIENT_DIR):
         shutil.rmtree(CLIENT_DIR)
     # Create fresh instance of SCO data store
     self.sco = SCOClient(api_url=API_URL, data_dir=CLIENT_DIR)
예제 #3
0
 def setUp(self):
     """Clean-up eventual existing test data store. Create fresh data store
     instance."""
     # Set file names for subject and image group files for upload
     self.SUBJECT_FILE = os.path.join(DATA_DIR, 'subjects/ernie.tar.gz')
     self.IMAGES_ARCHIVE = os.path.join(DATA_DIR, 'images/small-sample.tar')
     # Add Freesurfer subject path
     add_subject_path(ENV_DIR)
     # Delete data store directory if exists
     if os.path.isdir(API_DIR):
         shutil.rmtree(API_DIR)
     # Drop test database
     MongoClient().drop_database('test_sco')
     mongo = MongoDBFactory(db_name='test_sco')
     # Create fresh instance of SCO data store
     self.db = SCODataStore(mongo, API_DIR)
     self.engine = SCOEngine(mongo)
예제 #4
0
 def setUp(self):
     """Clean-up eventual existing test data store. Create fresh data store
     instance."""
     # Set file names for subject and image group files for upload
     self.SUBJECT_FILE = os.path.join(DATA_DIR, 'subjects/kay2008_subj1.tar.gz')
     self.IMAGES_ARCHIVE = os.path.join(DATA_DIR, 'images/sample_images.tar.gz')
     self.FUNC_DATA = os.path.join(DATA_DIR, 'func/sample.nii')
     # Add Freesurfer subject path
     add_subject_path(ENV_DIR)
     # Delete data store directory if exists
     if os.path.isdir(API_DIR):
         shutil.rmtree(API_DIR)
     # Drop test database
     MongoClient().drop_database('test_sco')
     mongo = MongoDBFactory(db_name='test_sco')
     # Load models
     init_registry_from_json(mongo, MODELS_FILE)
     # Create fresh instance of SCO data store
     self.db = SCODataStore(mongo, API_DIR)
     self.engine = SCOEngine(mongo)
예제 #5
0
def compare_with_Kay2013(
        image_base_path,
        stimuli_idx,
        voxel_idx=None,
        subject='test-sub',
        subject_dir='/home/billbrod/Documents/SCO-test-data/Freesurfer_subjects',
        stimulus_pixels_per_degree=53,
        normalized_pixels_per_degree=12,
        stimulus_aperture_edge_width=0,
        max_eccentricity=7.5,
        **kwargs):
    """Run python SCO and Matlab SOC on the same batch of images

    Arguments
    ---------

    image_base_path: string. This is assumed to either be a directory, in which case the model will
    be run on every image in the directory, or a .mat file containing the image stimuli, in which
    case they will be loaded in and used.

    stimuli_idx: array. Which stimuli from image_base_path we should use. These are assumed
    to be integers that we use as indexes into the stimulus_images (if image_base_path is a .mat
    file) or stimulus_image_filenames (if it's a directory), and we only pass those specified
    images/filenames to the model.

    voxel_idx: array or None, optional. Which voxels to run the model for and create
    predictions. If None or unset, will run the model for all voxels. Else will use the optional
    calculator sco.anatomy.core.calc_voxel_selector to subset the voxels and run only those indices
    correspond to those included in this array.

    subject_dir: string or None. If not None, will add this to neuropythy.freesurfer's subject
    paths

    subject: string. The specific subject to run on.
    """
    if subject_dir is not None and subject_dir not in nfs.subject_paths():
        nfs.add_subject_path(subject_dir)
    # if there's just one value and not a list
    if not hasattr(stimuli_idx, '__iter__'):
        stimuli_idx = [stimuli_idx]
    stimuli_idx = np.asarray(stimuli_idx)
    voxel_idx = np.asarray(voxel_idx)
    if voxel_idx is not None:
        if not hasattr(voxel_idx, '__iter__'):
            voxel_idx = [voxel_idx]
        anat_chain = (
            ('import', anatomy_core.import_benson14_volumes_from_freesurfer),
            ('calc_pRF_centers',
             anatomy_core.calc_pRFs_from_freesurfer_retinotopy_volumes),
            ('calc_voxel_selector', anatomy_core.calc_voxel_selector),
            ('calc_anatomy_defualt_parameters',
             anatomy_core.calc_anatomy_default_parameters),
            ('calc_pRF_sizes', anatomy_core.calc_Kay2013_pRF_sizes))
        anat_chain = calc_chain(anat_chain)
        if 'voxel_idx' not in kwargs:
            kwargs['voxel_idx'] = voxel_idx
    else:
        anat_chain = calc_anatomy
    if os.path.isdir(image_base_path):
        # Interestingly enough, this works regardless of whether image_base_path ends in os.sep or
        # not.
        stimulus_image_filenames = glob.glob(image_base_path + os.sep + "*")
        # imghdr.what(img) returns something if img is an image file (it returns a stirng
        # specifying whta type of image it is). If it's not an image file, it returns None.
        stimulus_image_filenames = np.asarray(
            [img for img in stimulus_image_filenames if imghdr.what(img)])
        stimulus_image_filenames = stimulus_image_filenames[stimuli_idx]
        if 'stimulus_image_filenames' not in kwargs:
            kwargs['stimulus_image_filenames'] = stimulus_image_filenames
        # here, stimuli_names can simply be the filenames we're using
        stimuli_names = [
            os.path.split(fn)[-1] for fn in stimulus_image_filenames
        ]
        # and we use the default sco_chain (with the possible exception of anat_chain, see above)
        sco_chain = (('calc_anatomy', anat_chain), ('calc_stimulus',
                                                    calc_stimulus),
                     ('calc_contrast', calc_contrast), ('calc_pRF', calc_pRF),
                     ('calc_normalization', calc_normalization))
    # if it's a .mat file
    elif os.path.splitext(image_base_path)[1] == ".mat":
        # in this case, we assume it's stimuli.mat from
        # http://kendrickkay.net/socmodel/index.html#contentsofstimuli. We want to grab only the
        # 'images' key from this and only images 226 through 260/end (stimulus set 3; note that we
        # have to convert from MATLAB's 1-indexing to python's 0-indexing), since each entry is a
        # single frame and thus easy to handle.
        stimulus_images = sio.loadmat(image_base_path)
        if stimulus_images['images'].ndim == 2 and stimulus_images[
                'images'].shape[0] == 1:
            # then we need to unfold the stimulus_images; each stimulus is 3 dimensional (several
            # two dimensional images), so if it's only two dimensional and the first dimension is
            # 1, this is collapsed across that dimension (happens when not all stimuli are the same
            # size).
            stimulus_images = stimulus_images['images'][0, stimuli_idx]
        else:
            # else we can just grab the corresponding images
            stimulus_images = stimulus_images['images'][stimuli_idx]
        # some of the stimuli have multiple frames associated with them; we want to predict all of
        # them separately, but remember that they were grouped together for later visualization. I
        # can't figure out how to get this loop into a list comprehension, so we'll have to deal
        # with the slight slowdown. stimuli_names is an array we create to keep track of these
        # images. We will return it to the calling code and eventually pass it to create_model_df.
        tmp = []
        stimuli_names = []
        for idx, im in zip(stimuli_idx, stimulus_images):
            if len(im.shape) == 3:
                for i in range(im.shape[2]):
                    tmp.append(im[:, :, i])
                    stimuli_names.append("%04d_sub%02d" % (idx, i))
            else:
                tmp.append(im)
                stimuli_names.append("%04d" % idx)
        stimuli_names = np.asarray(stimuli_names)
        stimulus_images = np.asarray(tmp)
        if 'stimulus_images' not in kwargs:
            kwargs.update({'stimulus_images': stimulus_images})
        kwargs.update({'stimulus_image_filenames': None})
        sco_chain = (('calc_anatomy', anat_chain), ('calc_stimulus',
                                                    calc_stimulus),
                     ('calc_contrast', calc_contrast), ('calc_pRF', calc_pRF),
                     ('calc_normalization', calc_normalization))
    else:
        raise Exception(
            "Don't know how to handle image_base_path %s, must be directory or .mat "
            "file" % image_base_path)
    # This prepares the sco_chain, making it a callable object
    sco_chain = calc_chain(sco_chain)

    # in order to handle the fact that the Kay2013 matlab code only deals with spatial orientation
    # of 3 cpd, we have to define a new pRF_frequency_preference_function to replace the default.
    def freq_pref(e, s, l):
        # This takes in the eccentricity, size, and area, but we don't use any of them, since we
        # just want to use 3 cpd and ignore everything else. And this must be floats.
        return {3.0: 1.0}

    if 'pRF_frequency_preference_function' not in kwargs:
        kwargs['pRF_frequency_preference_function'] = freq_pref
    # And this runs it. To make sure it has the same size as the the images used in Kendrick's
    # code, we set the normalized_stimulus_aperture, normalized_aperture_edge_width, and
    # normalized_pixels_per_degree values.
    results = sco_chain(
        subject=subject,
        max_eccentricity=max_eccentricity,
        normalized_stimulus_aperture=max_eccentricity *
        normalized_pixels_per_degree,
        stimulus_pixels_per_degree=stimulus_pixels_per_degree,
        normalized_pixels_per_degree=normalized_pixels_per_degree,
        stimulus_aperture_edge_width=stimulus_aperture_edge_width,
        **kwargs)
    return results, stimuli_names
예제 #6
0
def surface_to_ribbon_command(args):
    '''
    surface_to_rubbon_command(args) can be given a list of arguments, such as sys.argv[1:]; these
    arguments may include any options and must include exactly one subject id and one output
    filename. Additionally one or two surface input filenames must be given. The surface files are
    projected into the ribbon and written to the output filename. For more information see the
    string stored in surface_to_ribbon_help.
    '''
    # Parse the arguments
    (args, opts) = _surface_to_ribbon_parser(args)
    # First, help?
    if opts['help']:
        print surface_to_ribbon_help
        return 1
    # and if we are verbose, lets setup a note function
    verbose = opts['verbose']
    def note(s):
        if verbose: print s
        return verbose
    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    # figure out our arguments:
    (lhfl, rhfl) = (opts['lh_file'], opts['rh_file'])
    if len(args) == 0:
      raise ValueError('Not enough arguments provided!')
    elif len(args) == 1:
      # must be that the subject is in the env?
      sub = find_subject_path(os.getenv('SUBJECT'))
      outfl = args[0]
    elif len(args) == 2:
      sbpth = find_subject_path(args[0])
      if sbpth is not None:
        sub = sbpth
      else:
        sub = find_subject_path(os.getenv('SUBJECT'))
        if lhfl is not None: rhfl = args[0]
        elif rhfl is not None: lhfl = args[0]
        else: raise ValueError('Given arg is not a subject: %s' % args[0])
      outfl = args[1]
    elif len(args) == 3:
      sbpth0 = find_subject_path(args[0])
      sbpth1 = find_subject_path(args[1])
      if sbpth0 is not None:
        sub = sbpth0
        if lhfl is not None: rhfl = args[1]
        elif rhfl is not None: lhfl = args[1]
        else: raise ValueError('Too many arguments given: %s' % args[1])
      elif sbpth1 is not None:
        sub = sbpth1
        if lhfl is not None: rhfl = args[0]
        elif rhfl is not None: lhfl = args[0]
        else: raise ValueError('Too many arguments given: %s' % args[0])
      else:
        sub = find_subject_path(os.getenv('SUBJECT'))
        if lhfl is not None or rhfl is not None:
          raise ValueError('Too many arguments and no subject given')
        (lhfl, rhfl) = args
      outfl = args[2]
    elif len(args) == 4:
      if lhfl is not None or rhfl is not None:
          raise ValueError('Too many arguments and no subject given')
      subidx = next((i for (i,a) in enumerate(args) if find_subject_path(a) is not None), None)
      if subidx is None: raise ValueError('No subject given')
      sub = find_subject_path(args[subidx])
      del args[subidx]
      (lhfl, rhfl, outfl) = args
    else:
      raise ValueError('Too many arguments provided!')
    if sub is None: raise ValueError('No subject specified or found in $SUBJECT')
    if lhfl is None and rhfl is None: raise ValueError('No surfaces provided')
    # check the method
    method = opts['method'].lower()
    if method != 'weighted' and method != 'max':
        raise ValueError('Unsupported method: %s' % method)
    # and the datatype
    if opts['dtype'] is None: dtyp = None
    elif opts['dtype'].lower() == 'float': dtyp = np.float32
    elif opts['dtype'].lower() == 'int': dtyp = np.int32
    else: raise ValueError('Type argument must be float or int')
    # Now, load the data:
    note('Reading surfaces...')
    (lhdat, rhdat) = (None, None)
    if lhfl is not None:
        note('   - Reading LH file: %s' % lhfl)
        lhdat = read_surf_file(lhfl)
    if rhfl is not None:
        note('   - Reading RH file: %s' % rhfl)
        rhdat = read_surf_file(rhfl)
    (dat, hemi) = (rhdat, 'rh') if lhdat is None else \
                  (lhdat, 'lh') if rhdat is None else \
                  ((lhdat, rhdat), None)
    note('Generating vertex-to-voxel mapping...')
    sub = freesurfer_subject(sub)
    s2r = cortex_to_ribbon_map(sub, hemi=hemi)
    # okay, make the volume...
    note('Generating volume...')
    vol = cortex_to_ribbon(sub, dat,
                           map=s2r, hemi=hemi, method=method, fill=opts['fill'], dtype=dtyp)
    # and write out the file
    note('Exporting volume file: %s' % outfl)
    vol.to_filename(outfl)
    note('surface_to_ribbon complete!')
    return 0    
예제 #7
0
def calc_arguments(args):
    '''
    calc_arguments is a calculator that parses the command-line arguments for the registration
    command and produces the subject, the model, the log function, and the additional options.
    '''
    (args, opts) = _retinotopy_parser(args)
    # We do some of the options right here...
    if opts['help']:
        print(info, file=sys.stdout)
        sys.exit(1)
    # and if we are verbose, lets setup a note function
    verbose = opts['verbose']
    def note(s):
        if verbose:
            print(s, file=sys.stdout)
            sys.stdout.flush()
        return verbose
    def error(s):
        print(s, file=sys.stderr)
        sys.stderr.flush()
        sys.exit(1)
    if len(args) < 1: error('subject argument is required')
    try: # we try FreeSurfer first:
        import neuropythy.freesurfer as fs
        # Add the subjects directory, if there is one
        if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
            fs.add_subject_path(opts['subjects_dir'])
        # Get the subject now
        sub = fs.subject(args[0])
    except Exception: sub = None
    if sub is None:
        try: # As an alternative, try HCP
            import neuropythy.hcp as hcp
            # Add the subjects directory, if there is one
            if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
                hcp.add_subject_path(opts['subjects_dir'])
            sub = hcp.subject(args[0])
        except Exception: sub = None
    if sub is None: error('Failed to load subject %s' % args[0])
    # and the model
    if len(args) > 1:       mdl_name = args[1]
    elif opts['model_sym']: mdl_name = 'schira'
    else:                   mdl_name = 'benson17'
    try:
        if opts['model_sym']:
            model = {h:retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']}
        else:
            model = {h:retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']}
    except Exception: error('Could not load retinotopy model %s' % mdl_name)

    # Now, we want to run a few filters on the options
    # Parse the simple numbers
    for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen',
              'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']:
        opts[o] = float(opts[o])
    opts['max_steps'] = int(opts['max_steps'])
    # Make a note:
    note('Processing subject: %s' % sub.name)
    del opts['help']
    del opts['verbose']
    del opts['subjects_dir']
    # That's all we need!
    return pimms.merge(opts,
                       {'subject': sub.persist(),
                        'model':   pyr.pmap(model),
                        'options': pyr.pmap(opts),
                        'note':    note,
                        'error':   error})
예제 #8
0
def register_retinotopy_command(args):
    '''
    register_retinotopy_command(args) can be given a list of arguments, such as sys.argv[1:]; these
    arguments may include any options and must include at least one subject id. All subjects whose
    ids are given are registered to a retinotopy model, and the resulting registration, as well as
    the predictions made by the model in the registration, are exported.
    '''
    # Parse the arguments
    (args, opts) = _retinotopy_parser(args)
    # First, help?
    if opts['help']:
        print register_retinotopy_help
        return 1
    # and if we are verbose, lets setup a note function
    verbose = opts['verbose']
    def note(s):
        if verbose: print s
        return verbose
    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    # Parse the simple numbers
    for o in ['weight_cutoff', 'edge_strength', 'angle_strength', 'func_strength',
              'max_step_size', 'max_out_eccen']:
        opts[o] = float(opts[o])
    opts['max_steps'] = int(opts['max_steps'])
    # These are for now not supported: #TODO
    if opts['angle_math'] or opts['angle_radians'] or opts['eccen_radians']:
        print 'Mathematical angles and angles not in degrees are not yet supported.'
        return 1
    # The remainder of the args can wait for now; walk through the subjects:
    tag_key = {'eccen': 'eccentricity', 'angle': 'polar_angle', 'label': 'V123_label'}
    for subnm in args:
        sub = freesurfer_subject(subnm)
        note('Processing subject: %s' % sub.id)
        # we need to register this subject...
        res = {}
        ow = not opts['no_overwrite']
        for h in ['LH','RH']:
            note('   Processing hemisphere: %s' % h)
            hemi = sub.__getattr__(h)
            # See if we are loading custom values...
            (ang,ecc,wgt) = (None,None,None)
            suffix = '_' + h.lower() + '_file'
            if opts['angle'  + suffix] is not None: ang = _guess_surf_file(opts['angle'  + suffix])
            if opts['eccen'  + suffix] is not None: ecc = _guess_surf_file(opts['eccen'  + suffix])
            if opts['weight' + suffix] is not None: wgt = _guess_surf_file(opts['weight' + suffix])
            # Do the registration
            note('    - Running Registration...')
            res[h] = register_retinotopy(hemi, V123_model(),
                                         polar_angle=ang, eccentricity=ecc, weight=wgt,
                                         weight_cutoff=opts['weight_cutoff'],
                                         partial_voluming_correction=opts['part_vol_correct'],
                                         edge_scale=opts['edge_strength'],
                                         angle_scale=opts['angle_strength'],
                                         functional_scale=opts['func_strength'],
                                         prior=opts['prior'],
                                         max_predicted_eccen=opts['max_out_eccen'],
                                         max_steps=opts['max_steps'],
                                         max_step_size=opts['max_step_size'])
            # Perform the hemi-specific outputs now:
            if not opts['no_reg_export']:
                regnm = '.'.join([h.lower(), opts['registration_name'], 'sphere', 'reg'])
                flnm = (os.path.join(sub.directory, 'surf', regnm) if h == 'LH' else
                        os.path.join(sub.directory, 'xhemi', 'surf', regnm))
                if ow or not os.path.exist(flnm):
                    note('    - Exporting registration file: %s' % flnm)
                    fsio.write_geometry(flnm, res[h].coordinates.T, res[h].faces.T,
                                        'Created by neuropythy (github.com/noahbenson/neuropythy)')
                else:
                    note('    - Skipping registration file: %s (file exists)' % flnm)
            if not opts['no_surf_export']:
                for dim in ['angle', 'eccen', 'label']:
                    flnm = os.path.join(sub.directory, 'surf',
                                        '.'.join([h.lower(), opts[dim + '_tag'], 'mgz']))
                    if ow or not os.path.exist(flnm):
                        note('    - Exporting prediction file: %s' % flnm)
                        img = fsmgh.MGHImage(
                            np.asarray([[res[h].prop(tag_key[dim])]],
                                       dtype=(np.int32 if dim == 'label' else np.float32)),
                            np.eye(4))
                        img.to_filename(flnm)
                    else:
                        note('    - Skipping prediction file: %s (file exists)' % flnm)
        # Do the volume exports here
        if not opts['no_vol_export']:
            note('   Processing volume data...')
            note('    - Calculating cortex-to-ribbon mapping...')
            surf2rib = cortex_to_ribbon_map(sub, hemi=None)
            for dim in ['angle', 'eccen', 'label']:
                flnm = os.path.join(sub.directory, 'mri', opts[dim + '_tag'] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Generating volume file: %s' % flnm)
                    vol = cortex_to_ribbon(sub,
                                           (res['LH'].prop(tag_key[dim]),
                                            res['RH'].prop(tag_key[dim])),
                                           map=surf2rib,
                                           dtype=(np.int32 if dim == 'label' else np.float32))
                    note('    - Exporting volume file: %s' % flnm)
                    vol.to_filename(flnm)
                else:
                    note('    - Skipping volume file: %s (file exists)' % flnm)
        # That is it for this subject!
        note('   Subject %s finished!' % sub.id)
    # And if we made it here, all was successful.
    return 0    
예제 #9
0
opts = {
    'stimulus': image_files,
    'subject': subject_dir,
    'stimulus_edge_value': 0.5,
    'gabor_orientations' : 8,
    'pixels_per_degree': d2p,
    'normalized_pixels_per_degree' : d2p,
    'max_eccentricity': max_eccen,
    'aperture_edge_width': 0,
    'aperture_radius': max_eccen,
    'output_directory': output_dir,
    'measurements_filename': func_filename
}

add_subject_path(env_dir)

model = sco.build_model('benson17')
data  = model(opts)

print data['exported_files']

if generate_cortical_images:
    cortex_idcs = data['cortex_indices']
    measurement_idcs = data['measurement_indices']

    # Get the filename of the predicted results out of the exported_files list:
    pred_filename = os.path.join(output_dir, 'prediction.nii.gz')

    # use nibabel (import nibabel as nib) to load this file
    pred_nii = nib.load(pred_filename)
예제 #10
0
def benson14_retinotopy_command(*args):
    '''
    benson14_retinotopy_command(args...) runs the benson14_retinotopy command; see 
    benson14_retinotopy_help for mor information.
    '''
    # Parse the arguments...
    (args, opts) = _benson14_parser(args)
    # help?
    if opts['help']:
        print benson14_retinotopy_help
        return 1
    # verbose?
    verbose = opts['verbose']
    def note(s):
        if verbose: print s
        return verbose
    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    ow = not opts['no_overwrite']
    nse = opts['no_surf_export']
    nve = opts['no_vol_export']
    tr = {'polar_angle':  opts['angle_tag'],
          'eccentricity': opts['eccen_tag'],
          'v123roi':      opts['label_tag']}
    # okay, now go through the subjects...
    for subnm in args:
        note('Processing subject %s:' % subnm)
        sub = freesurfer_subject(subnm)
        note('   - Interpolating template...')
        (lhdat, rhdat) = benson14_retinotopy(sub)
        # Export surfaces
        if nse:
            note('   - Skipping surface export.')
        else:
            note('   - Exporting surfaces:')
            for (t,dat) in lhdat.iteritems():
                flnm = os.path.join(sub.directory, 'surf', 'lh.' + tr[t] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Exporting LH prediction file: %s' % flnm)
                    img = fsmgh.MGHImage(
                        np.asarray([[dat]], dtype=(np.int32 if t == 'v123roi' else np.float32)),
                        np.eye(4))
                    img.to_filename(flnm)
                else:
                    note('    - Not overwriting existing file: %s' % flnm)
        # Export volumes
        if nve:
            note('   - Skipping volume export.')
        else:
            surf2rib = cortex_to_ribbon_map(sub, hemi=None)
            note('   - Exporting Volumes:')
            for t in lhdat.keys():
                flnm = os.path.join(sub.directory, 'mri', tr[t] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Preparing volume file: %s' % flnm)
                    vol = cortex_to_ribbon(sub,
                                           (lhdat[t], rhdat[t]),
                                           map=surf2rib,
                                           dtype=(np.int32 if t == 'v123roi' else np.float32))
                    note('    - Exporting volume file: %s' % flnm)
                    vol.to_filename(flnm)
                else:
                    note('    - Not overwriting existing file: %s' % flnm)
        note('   Subject %s finished!' % sub.id)
    return 0
def benson14_retinotopy_command(*args):
    '''
    benson14_retinotopy_command(args...) runs the benson14_retinotopy command; see 
    benson14_retinotopy_help for mor information.
    '''
    # Parse the arguments...
    (args, opts) = _benson14_parser(args)
    # help?
    if opts['help']:
        print benson14_retinotopy_help
        return 1
    # verbose?
    verbose = opts['verbose']
    def note(s):
        if verbose: print s
        return verbose
    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    ow = not opts['no_overwrite']
    nse = opts['no_surf_export']
    nve = opts['no_vol_export']
    tr = {'polar_angle':  opts['angle_tag'],
          'eccentricity': opts['eccen_tag'],
          'visual_area':      opts['label_tag']}
    # okay, now go through the subjects...
    for subnm in args:
        note('Processing subject %s:' % subnm)
        sub = freesurfer_subject(subnm)
        note('   - Interpolating template...')
        (lhdat, rhdat) = predict_retinotopy(sub, template=opts['template'])
        # Export surfaces
        if nse:
            note('   - Skipping surface export.')
        else:
            note('   - Exporting surfaces:')
            for (t,dat) in lhdat.iteritems():
                flnm = os.path.join(sub.directory, 'surf', 'lh.' + tr[t] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Exporting LH prediction file: %s' % flnm)
                    img = fsmgh.MGHImage(
                        np.asarray([[dat]], dtype=(np.int32 if t == 'visual_area' else np.float32)),
                        np.eye(4))
                    img.to_filename(flnm)
                else:
                    note('    - Not overwriting existing file: %s' % flnm)
            for (t,dat) in rhdat.iteritems():
                flnm = os.path.join(sub.directory, 'surf', 'rh.' + tr[t] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Exporting RH prediction file: %s' % flnm)
                    img = fsmgh.MGHImage(
                        np.asarray([[dat]], dtype=(np.int32 if t == 'visual_area' else np.float32)),
                        np.eye(4))
                    img.to_filename(flnm)
                else:
                    note('    - Not overwriting existing file: %s' % flnm)
        # Export volumes
        if nve:
            note('   - Skipping volume export.')
        else:
            surf2rib = cortex_to_ribbon_map(sub, hemi=None)
            note('   - Exporting Volumes:')
            for t in lhdat.keys():
                flnm = os.path.join(sub.directory, 'mri', tr[t] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Preparing volume file: %s' % flnm)
                    vol = cortex_to_ribbon(sub,
                                           (lhdat[t], rhdat[t]),
                                           map=surf2rib,
                                           method=('max' if t == 'visual_area' else 'weighted'),
                                           dtype=(np.int32 if t == 'visual_area' else np.float32))
                    note('    - Exporting volume file: %s' % flnm)
                    vol.to_filename(flnm)
                else:
                    note('    - Not overwriting existing file: %s' % flnm)
        note('   Subject %s finished!' % sub.id)
    return 0
            
def register_retinotopy_command(args):
    '''
    register_retinotopy_command(args) can be given a list of arguments, such as sys.argv[1:]; these
    arguments may include any options and must include at least one subject id. All subjects whose
    ids are given are registered to a retinotopy model, and the resulting registration, as well as
    the predictions made by the model in the registration, are exported.
    '''
    # Parse the arguments
    (args, opts) = _retinotopy_parser(args)
    # First, help?
    if opts['help']:
        print register_retinotopy_help
        return 1
    # and if we are verbose, lets setup a note function
    verbose = opts['verbose']

    def note(s):
        if verbose: print s
        return verbose

    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    # Parse the simple numbers
    for o in [
            'weight_cutoff', 'edge_strength', 'angle_strength',
            'func_strength', 'max_step_size', 'max_out_eccen'
    ]:
        opts[o] = float(opts[o])
    opts['max_steps'] = int(opts['max_steps'])
    # These are for now not supported: #TODO
    if opts['angle_math'] or opts['angle_radians'] or opts['eccen_radians']:
        print 'Mathematical angles and angles not in degrees are not yet supported.'
        return 1
    # The remainder of the args can wait for now; walk through the subjects:
    tag_key = {
        'eccen': 'eccentricity',
        'angle': 'polar_angle',
        'label': 'visual_area'
    }
    for subnm in args:
        sub = freesurfer_subject(subnm)
        note('Processing subject: %s' % sub.id)
        # we need to register this subject...
        res = {}
        ow = not opts['no_overwrite']
        for h in ['LH', 'RH']:
            note('   Processing hemisphere: %s' % h)
            hemi = sub.__getattr__(h)
            # See if we are loading custom values...
            (ang, ecc, wgt) = (None, None, None)
            suffix = '_' + h.lower() + '_file'
            if opts['angle' + suffix] is not None:
                ang = _guess_surf_file(opts['angle' + suffix])
            if opts['eccen' + suffix] is not None:
                ecc = _guess_surf_file(opts['eccen' + suffix])
            if opts['weight' + suffix] is not None:
                wgt = _guess_surf_file(opts['weight' + suffix])
            # Do the registration
            note('    - Running Registration...')
            res[h] = register_retinotopy(
                hemi,
                retinotopy_model(),
                polar_angle=ang,
                eccentricity=ecc,
                weight=wgt,
                weight_cutoff=opts['weight_cutoff'],
                partial_voluming_correction=opts['part_vol_correct'],
                edge_scale=opts['edge_strength'],
                angle_scale=opts['angle_strength'],
                functional_scale=opts['func_strength'],
                prior=opts['prior'],
                max_predicted_eccen=opts['max_out_eccen'],
                max_steps=opts['max_steps'],
                max_step_size=opts['max_step_size'])
            # Perform the hemi-specific outputs now:
            if not opts['no_reg_export']:
                regnm = '.'.join(
                    [h.lower(), opts['registration_name'], 'sphere', 'reg'])
                flnm = (os.path.join(sub.directory, 'surf', regnm)
                        if h == 'LH' else os.path.join(sub.directory, 'xhemi',
                                                       'surf', regnm))
                if ow or not os.path.exist(flnm):
                    note('    - Exporting registration file: %s' % flnm)
                    fsio.write_geometry(
                        flnm, res[h].coordinates.T, res[h].faces.T,
                        'Created by neuropythy (github.com/noahbenson/neuropythy)'
                    )
                else:
                    note('    - Skipping registration file: %s (file exists)' %
                         flnm)
            if not opts['no_surf_export']:
                for dim in ['angle', 'eccen', 'label']:
                    flnm = os.path.join(
                        sub.directory, 'surf',
                        '.'.join([h.lower(), opts[dim + '_tag'], 'mgz']))
                    if ow or not os.path.exist(flnm):
                        note('    - Exporting prediction file: %s' % flnm)
                        img = fsmgh.MGHImage(
                            np.asarray([[res[h].prop(tag_key[dim])]],
                                       dtype=(np.int32 if dim == 'label' else
                                              np.float32)), np.eye(4))
                        img.to_filename(flnm)
                    else:
                        note('    - Skipping prediction file: %s (file exists)'
                             % flnm)
        # Do the volume exports here
        if not opts['no_vol_export']:
            note('   Processing volume data...')
            note('    - Calculating cortex-to-ribbon mapping...')
            surf2rib = cortex_to_ribbon_map(sub, hemi=None)
            for dim in ['angle', 'eccen', 'label']:
                flnm = os.path.join(sub.directory, 'mri',
                                    opts[dim + '_tag'] + '.mgz')
                if ow or not os.path.exist(flnm):
                    note('    - Generating volume file: %s' % flnm)
                    vol = cortex_to_ribbon(
                        sub, (res['LH'].prop(tag_key[dim]), res['RH'].prop(
                            tag_key[dim])),
                        map=surf2rib,
                        method=('max' if dim == 'label' else 'weighted'),
                        dtype=(np.int32 if dim == 'label' else np.float32))
                    note('    - Exporting volume file: %s' % flnm)
                    vol.to_filename(flnm)
                else:
                    note('    - Skipping volume file: %s (file exists)' % flnm)
        # That is it for this subject!
        note('   Subject %s finished!' % sub.id)
    # And if we made it here, all was successful.
    return 0