Ejemplo n.º 1
0
 def handle_inst(inst, k=None):
     if k: dirstack.append(k)
     if pimms.is_map(inst): handle_file(inst)
     elif isinstance(inst, (list, tuple)):
         if len(inst) == 0 or not pimms.is_map(inst[0]):
             handle_dir(inst)
         else:
             handle_file(inst)
     else:
         raise ValueError('Illegal instruction type: %s' % (inst, ))
     if k: dirstack.pop()
     return None
Ejemplo n.º 2
0
 def entry_meta_data(mds):
     '''
     lblidx.entry_meta_data is lists of meta-data maps for each of the labels in the given label
       index object.
     '''
     if mds is None: return None
     if is_dataframe(mds):
         mds = {k:mds[k].values for k in mds.colums}
     elif pimms.is_map(mds):
         ks = list(mds.keys())
         mds = [{k:v for (k,v) in zip(ks,vs)} for vs in np.transpose(list(mds.values()))]
     elif not pimms.is_array(mds) or not all(pimms.is_map(u) for u in mds):
         raise ValueError('unbalanced or non-map entry meta-data')
     return pimms.persist(mds)
Ejemplo n.º 3
0
def _parse_field_argument(instruct, faces, edges, coords):
    _java = java_link()
    if pimms.is_str(instruct):
        insttype = instruct
        instargs = []
    elif hasattr(instruct, '__iter__'):
        insttype = instruct[0]
        instargs = instruct[1:]
    else:
        raise RuntimeError(
            'potential field instruction must be list/tuple-like or a string')
    # look this type up in the types data:
    insttype = insttype.lower()
    if insttype not in _parse_field_data_types:
        raise RuntimeError('Unrecognized field data type: ' + insttype)
    instdata = _parse_field_data_types[insttype]
    # if the data is a dictionary, we must parse on the next arg
    if pimms.is_map(instdata):
        shape_name = instargs[0].lower()
        instargs = instargs[1:]
        if shape_name not in instdata:
            raise RuntimeError('Shape ' + shape_name +
                               ' not supported for type ' + insttype)
        instdata = instdata[shape_name]
    # okay, we have a list of instructions... find the java method we are going to call...
    java_method = getattr(_java.jvm.nben.mesh.registration.Fields, instdata[0])
    # and parse the arguments into a list...
    java_args = [
        _parse_field_function_argument(a, instargs, faces, edges, coords)
        for a in instdata[1:]
    ]
    # and call the function...
    return java_method(*java_args)
Ejemplo n.º 4
0
 def parameters(params):
     '''
     rsp.parameters is a pimms.itable object of the divisive normalization parameters (each row
     of the itable is one parameterization) used in the calculation for the immage array response
     object rsp. The parameters may be given to a response object as a list of maps or as an
     itable.
     '''
     if pimms.is_itable(params):
         return params
     elif pimms.is_map(params):
         tbl = pimms.itable(params)
         # we want this to fail if it can't be transformed to rows
         try:
             tbl.rows
         except:
             raise ValueError('map could not be cast to itable')
         return tbl
     else:
         tbl = {}
         try:
             p0 = params[0]
             tbl = {k: [v] for (k, v) in p0.iteritems()}
             for p in params[1:]:
                 if len(p) != len(p0): raise ValueError()
                 for (k, v) in p.iteritems():
                     tbl[k].append(v)
             tbl = pimms.itable(tbl)
             tbl.rows
         except:
             raise ValueError(
                 'parameters must be an itable, a map of columns, or a list of '
                 + 'parameter maps')
         return tbl
Ejemplo n.º 5
0
def to_credentials(arg):
    '''
    to_credentials(arg) converts arg into a pair (key, secret) if arg can be coerced into such a
      pair and otherwise raises an error.
    
    Possible inputs include:
      * A tuple (key, secret)
      * A mapping with the keys 'key' and 'secret'
      * The name of a file that can load credentials via the load_credentials() function
      * A string that separates the key and secret by ':', e.g., 'mykey:mysecret'
      * A string that separates the key and secret by a "\n", e.g., "mykey\nmysecret"
    '''
    if pimms.is_str(arg):
        try:
            return load_credentials(arg)
        except Exception:
            pass
        try:
            return str_to_credentials(arg)
        except Exception:
            raise ValueError(
                'String "%s" is neither a file containing credentials nor a valid'
                ' credentials string itself.' % arg)
    elif pimms.is_map(arg) and 'key' in arg and 'secret' in arg:
        return (arg['key'], arg['secret'])
    elif pimms.is_vector(arg, str) and len(arg) == 2:
        return tuple(arg)
    else:
        raise ValueError(
            'given argument cannot be coerced to credentials: %s' % arg)
Ejemplo n.º 6
0
def to_image_spec(img, **kw):
    '''
    to_image_spec(img) yields a dictionary of meta-data for the given nibabel image object img.
    to_image_spec(hdr) yields the equivalent meta-data for the given nibabel image header.

    Note that obj may also be a mapping object, in which case it is returned verbatim.
    '''
    if pimms.is_vector(img, 'int') and is_tuple(img) and len(img) < 5:
        r = image_array_to_spec(np.zeros(img))
    elif pimms.is_map(img):
        r = img
    elif is_image_header(img):
        r = image_header_to_spec(img)
    elif is_image(img):
        r = image_to_spec(img)
    elif is_image_array(img):
        r = image_array_to_spec(img)
    else:
        raise ValueError('cannot convert object of type %s to image-spec' %
                         type(img))
    if len(kw) > 0: r = {k: v for m in (r, kw) for (k, v) in six.iteritems(m)}
    # normalize the entries
    for (k, aliases) in six.iteritems(imspec_aliases):
        if k in r: continue
        for al in aliases:
            if al in r:
                val = r[al]
                r = pimms.assoc(pimms.dissoc(r, al), k, val)
                break
    return r
Ejemplo n.º 7
0
def face_vmag(hemi, retinotopy='any', to=None, **kw):
    '''
    face_vmag(mesh) yields the visual magnification based on the projection of individual faces on
      the cortical surface into the visual field.
    face_vmag(mdat) uses the given magnification data mdat (as returned from mag_data()); if valid
      magnification data is passed then all options related to the mag_data() function are ignored.

    All options accepted by mag_data() are accepted by disk_vmag().

    The additional optional arguments are also accepted:
      * to (default: None) specifies that the resulting data should be transformed in some way;
        these transformations are:
          * None or 'data': returns the full magnification data without transformation;
          * 'faces': returns a property of the visual magnification value of each face;
          * 'vertices': returns a property of the visual magnification value of each vertex, as
            determined by averaging the magnification 
    '''
    mdat = mag_data(hemi, retinotopy=retinotopy, **kw)
    if pimms.is_vector(mdat): return tuple([face_vmag(m, to=to) for m in mdat])
    elif pimms.is_map(mdat.keys(), 'int'):
        return pimms.lazy_map({k: curry(lambda k: face_vmag(mdat[k], to=to), k)
                               for k in six.iterkeys(mdat)})
    #TODO: implement the face_vmag calculation using mdat
    # convert to the appropriate type according to the to param
    raise NotImplementedError()
Ejemplo n.º 8
0
 def instructions(inst):
     '''
     filemap.instructions is the map of load/save instructions for the given filemap.
     '''
     if not pimms.is_map(inst) and not isinstance(inst, list):
         raise ValueError('instructions must be a map or a list')
     return pimms.persist(inst)
Ejemplo n.º 9
0
def is_image_spec(imspec):
    '''
    is_image_spec(imspec) yields True if imspec is a map with the keys 'affine' and 'image_shape',
      otherwise yields False.
    '''
    return (pimms.is_map(imspec)
            and imspec_lookup(imspec, 'affine') is not None
            and imspec_lookup(imspec, 'image_shape') is not None)
Ejemplo n.º 10
0
 def flag_words(u):
     '''
     clp.flag_words yields the persistent map of optional flag words recognized by the given
       command-line parser clp.
     '''
     if pimms.is_pmap(u): return u
     elif pimms.is_map(u): return pyr.pmap(u)
     else: raise ValueError('flag_words must be a mapping')
Ejemplo n.º 11
0
 def default_values(dv):
     '''
     clp.default_values yields the persistent map of default values for the given command-line
       parser clp.
     '''
     if pimms.is_pmap(dv): return dv
     elif pimms.is_map(dv): return pyr.pmap(dv)
     else: raise ValueError('default_value must be a mapping')
Ejemplo n.º 12
0
 def option_characters(u):
     '''
     clp.option_characters yields the persistent map of optional characters recognized by the
       given command-line parser clp.
     '''
     if pimms.is_pmap(u): return u
     elif pimms.is_map(u): return pyr.pmap(u)
     else: raise ValueError('option_characters must be a mapping')
Ejemplo n.º 13
0
 def path_parameters(pp):
     '''
     filemap.path_parameters is a map of parameters for the filemap's path.
     '''
     if pp is None: return pyr.m()
     elif not pimms.is_map(pp):
         raise ValueError('path perameters must be a mapping')
     else:
         return pimms.persist(pp)
Ejemplo n.º 14
0
def to_image(img, image_type=None, meta_data=None, **kwargs):
    '''
    to_image(array) yields a Nifti1Image of the given array with default meta-data.
    to_image(array, image_type) yields an image object of the given type; image_type may either be
      an image class or a class name (see supported types below).
    to_image((array, meta_data)) uses the given mapping of meta-data to fill in the image's
      meta-data; note that meta_data may simply be an affine transformation matrix.
    to_image((array, affine, meta_data)) uses the given affine specifically (the given affine
      overrides any affine included in the meta_data).

    Note that the array may optionally be an image itself, in which case its meta-data is used as a
    starting point for the new meta-data. Any meta-data passed as a tuple overwrites this meta-data,
    and any meta-data passed as an optional argument overwrites this meta-data in turn.

    The first optional argument, specifying image_type is as an image type if possible, but if a
    meta-data mapping is passed as the first argument it is used as such; otherwise, the optional
    third argument is named meta_data, and any additional keyword arguments passed to to_image are
    merged into this meta_data object left-to-right (i.e., keyword arguments overwrite the meta_data
    keys).
    '''
    # quick cleanup of args:
    if not pimms.is_map(meta_data): meta_data = to_image_meta_data(meta_data)
    meta_data = pimms.merge({} if meta_data is None else meta_data, kwargs)
    if image_type is None: image_type = 'nifti1'
    # deduce image type
    image_type = to_image_type(image_type)
    # okay, next, parse the image argument itself:
    if is_tuple(img):
        if len(img) == 1: (img, aff, mdat) = (img[0], None, None)
        elif len(img) == 2: (img, aff, mdat) = (img[0], None, img[1])
        elif len(img) == 3: (img, aff, mdat) = img
        else:
            raise ValueError(
                'cannot parse more than 3 elements from image tuple')
    else:
        (aff, mdat) = (None, None)
    # see if the img argument is an image object
    try:
        (img, aff0, mdat0) = (img.dataobj, img.affine, to_image_meta_data(img))
    except Exception:
        (aff0, mdat0) = (None, {})
    # check that the affine wasn't given as the meta-data (e.g. (img,aff) instead of (img,mdat))
    if aff is None and mdat is not None:
        try:
            (aff, mdat) = (to_affine(mdat, 3), {})
        except Exception:
            pass
    # parse the meta-data that has been given
    mdat = dict(pimms.merge(mdat0, {} if mdat is None else mdat, meta_data))
    # if there is an explicit affine, we put it into mdat now
    if aff is not None: mdat['affine'] = to_affine(aff, 3)
    if aff0 is not None and 'affine' not in mdat:
        mdat['affine'] = to_affine(aff0, 3)
    # okay, we create the image now:
    return image_type.create(img, meta_data=mdat)
Ejemplo n.º 15
0
 def area_name_to_id(vai):
     '''
     mdl.area_name_to_id is a persistent map whose keys are area names (such as 'V1' or 'hV4')
     and whose values are the area id (a number greater than 0) for that area.
     mdl.area_name_to_id is a parameter which may be provided as a lsit of area names, in which
     case the first is assumed to be area 1, the next area 2, etc.
     '''
     if vai is None: return None
     if not pimms.is_map(vai): return pyr.pmap({nm:(ii+1) for (ii,nm) in enumerate(vai)})
     elif pimms.is_pmap(vai): return vai
     else: return pyr.pmap(vai)
Ejemplo n.º 16
0
def label_index(dat, *args, **kw):
    '''
    label_index(idx_map) converts the given map- or dict-like object idx_map into a label index by
      assuming that the keys are label ids and the values are label names or tuples of label names
      and (r,g,b,a) colors.
    label_index(ids, names) uses the given ids and names to make the label index.
    label_index(ids, names, colors) additionally uses the given colors.

    Note that if there is not a label with id 0 then such a label is automatically created with the
    name 'none', the rgba color [0,0,0,0], and no entry meta-data. As a general rule, the label 0
    should be used to indicate that a label is missing.

    The optional arguments meta_data and entry_meta_data may specify both the meta-data for the
    label-index object itself as well as the meta-data for the individual entries.
    '''
    md = kw.pop('meta_data', {})
    mds = kw.pop('entry_meta_data', None)
    if len(kw) > 0: raise ValueError('unrecognized optional argument(s) given to label_index')
    if len(args) == 0:
        if pimms.is_map(dat):
            (ids,nms,clrs) = ([],[],[])
            for (k,v) in six.iteritems(dat):
                if pimms.is_scalar(v): c = None
                else: (v,c) = v
                if pimms.is_str(k):
                    ids.append(v)
                    nms.append(k)
                else:
                    ids.append(k)
                    nms.append(v)
                if c is not None: clrs.append(c)
        elif is_dataframe(dat):
            if dat.index.name.lower() == 'id': ids = dat.index.values
            else: ids = dat['id'].values
            nms = dat['name'].values
            if 'color' in dat: clrs = np.array(list(map(list, dat['color'].values)))
            elif all(k in dat for k in ['r','g','b']):
                ks = ['r','g','b']
                if 'a' in dat: ks.append('a')
                clrs = np.array([[r[k] for k in ks].values for (ii,r) in dat.iterrows()])
            else: clrs = []
        elif pimms.is_vector(dat, 'int'):
            ids = np.unique(dat)
            nms = ['label%d'%k for k in ids]
            clrs = []
        else: raise ValueError('label_index(idx_map) given non-map argument')
    elif len(args) == 1: (ids,nms,clrs) = (dat, args[0], [])
    elif len(args) == 2: (ids,nms,clrs) = (dat, args[0], args[1])
    else: raise ValueError('Too many arguments given to label_index()')
    if clrs is None or len(clrs) == 0: clrs = None
    elif len(clrs) != len(ids): raise ValueError('color-count must match id-count')
    # okay, make the label index
    return LabelIndex(ids, nms, colors=clrs, meta_data=md, entry_meta_data=mds)
Ejemplo n.º 17
0
def to_image_meta_data(img):
    '''
    to_image_meta_data(img) yields a dictionary of meta-data for the given nibabel image object img.
    to_image_meta_data(hdr) yields the equivalent meta-data for the given nibabel image header.

    Note that obj may also be a mapping object, in which case it is returned verbatim.
    '''
    if pimms.is_map(img): return img
    try:    hdr = img.header
    except: hdr = img
    intype = to_image_type(hdr)
    return intype.meta_data(hdr)
Ejemplo n.º 18
0
 def supplemental_paths(sp):
     '''
     filemap.supplemental_paths is a map of additional paths provided to the filemap object.
     '''
     if not pimms.is_map(sp):
         raise ValueError('supplemental_paths must be a map')
     rr = {}
     for (nm, pth) in six.iteritems(sp):
         pth = FileMap.valid_path(pth)
         if pth is None:
             raise ValueError(
                 'supplemental paths must be directories or tarballs')
         rr[nm] = pth
     return pimms.persist(rr)
Ejemplo n.º 19
0
def _osf_tree(proj, path=None, base='osfstorage'):
    if path is None: path = (osf_basepath % (proj, base))
    else: path = (osf_basepath % (proj, base)) + path.lstrip('/')
    dat = json.loads(url_download(path, None))
    if 'data' not in dat:
        raise ValueError('Cannot detect kind of url for ' + path)
    dat = dat['data']
    if pimms.is_map(dat): return dat['links']['download']
    res = {
        r['name']: (u['links']['download'] if r['kind'] == 'file' else curry(
            lambda r: _osf_tree(proj, r, base), r['path']))
        for u in dat for r in [u['attributes']]
    }
    return pimms.lazy_map(res)
Ejemplo n.º 20
0
def is_mag_data(mdat):
    '''
    is_mag_data(dat) yields True if the given data is a valid set of magnification data and False
      otherwise.

    Note that this does not return True for all valid return values of the mag_data() function:
    specifically, if the mag_data() function yields a list of mag-data maps or a lazy-map of the
    mag-data maps split out by visual area, then this will return False. This function only returns
    True for a map of mag data itself.
    '''
    if not pimms.is_map(mdat): return False
    for k in ['surface_coordinates', 'visual_coordinates', 'mesh', 'submesh', 'mask',
              'retinotopy_data', 'masked_data', 'surface_areas', 'visual_areas']:
        if k not in mdat: return False
    return True
Ejemplo n.º 21
0
def to_pseudo_path(obj):
    '''
    to_pseudo_path(obj) yields a pseudo-path object that has been coerced from the given obj or
      raises an exception. If the obj is a pseudo-path already, it is returned unchanged.
    '''
    if is_pseudo_path(obj): return obj
    elif pimms.is_str(obj): return pseudo_path(obj)
    elif pimms.is_vector(obj):
        if len(obj) > 0 and pimms.is_map(obj[-1]):
            (obj, kw) = (obj[:-1], obj[-1])
        else:
            kw = {}
        return pseudo_path(*obj, **kw)
    else:
        raise ValueError('cannot coerce given object to a pseudo-path: %s' %
                         obj)
Ejemplo n.º 22
0
def saverc(filename, dat, overwrite=False):
    '''
    saverc(filename, d) saves the given configuration dictionary d to the given filename in JSON
      format. If d is not a dictionary or if filename already exists or cannot be created, an error
      is raised. This funciton does not create directories.

    The optional argument overwrite (default: False) may be passed as True to overwrite files that
    already exist.
    '''
    filename = os.path.expanduser(os.path.expandvars(filename))
    if not overwrite and os.path.isfile(filename):
        raise ValueError('Given filename %s already exists' % filename)
    if not pimms.is_map(dat):
        try: dat = dict(dat)
        except: raise ValueError('Given config data must be a dictionary')
    with open(filename, 'w') as fl:
        json.dump(dat, fl, sort_keys=True)
    return filename
Ejemplo n.º 23
0
def to_label_index(obj):
    '''
    to_label_index(obj) attempts to coerce the given object into a label index object; if obj is
      already a label index object, then obj itself is returned. If obj cannot be coerced into a
      label index, then an error is raised.

    The obj argument can be any of the following:
      * a label index
      * a label list (i.e., an integer vector)
      * a tuple of arguments, potentially ending with a kw-options map, that can be passed to the
        label_index function successfully.
    '''
    if   is_label_index(obj): return obj
    elif pimms.is_vector(obj, 'int'): return label_index(obj)
    elif is_dataframe(obj): return label_index(obj)
    elif is_tuple(obj):
        if len(obj) > 1 and pimms.is_map(obj[-1]): return label_index(*obj[:-1], **obj[-1])
        else: return label_index(*obj)
    else: raise ValueError('could not parse to_label_index parameter: %s' % obj)
Ejemplo n.º 24
0
def _osf_tree(proj, path=None, base='osfstorage'):
    if path is None: path = (osf_basepath % (proj, base))
    else: path = (osf_basepath % (proj, base)) + path.lstrip('/')
    dat = json.loads(url_download(path, None))
    if 'data' not in dat:
        raise ValueError('Cannot detect kind of url for ' + path)
    res = {}
    if pimms.is_map(dat['data']): return dat['data']['links']['download']
    while dat is not None:
        links = dat.get('links', {})
        dat = dat['data']
        for u in dat:
            for r in [u['attributes']]:
                res[r['name']] = (
                    u['links']['download'] if r['kind'] == 'file' else curry(
                        lambda r: _osf_tree(proj, r, base), r['path']))
        nxt = links.get('next', None)
        if nxt is not None: dat = json.loads(url_download(nxt, None))
        else: dat = None
    return pimms.lazy_map(res)
Ejemplo n.º 25
0
def import_stimuli(stimulus, gamma_correction_function):
    '''
    import_stimuli is a calculation that ensures that the stimulus images to be used in the sco
    calculation are properly imported.

    Required afferent values:
      @ stimulus May either be a dict or list of images matrices or a list of image filenames.

    Optional afferent values:
      @ gamma_correction_function May specifies how gamma should be corrected; this
        should usually be provided via the gamma argument (see calc_gamma_correction and gamma).

    Efferent output values:
      @ stimulus_map Will be a persistent dict whose keys are the image identifiers and whose
        values are the image matrices of the imported stimuli prior to normalization or any
        processing.
      @ stimulus_ordering Will be a persistent vector of the keys of stimulus_map in the order
        provided.
    '''
    # Make this into a map so that we have ids and images/filenames
    if not pimms.is_map(stimulus):
        # first task: turn this into a map
        if pimms.is_str(stimulus):
            stimulus = {stimulus: stimulus}
            order = [stimulus]
        elif hasattr(stimulus, '__iter__'):
            pat = '%%0%dd' % (int(np.log10(len(stimulus))) + 1)
            order = [(pat % i) for i in range(len(stimulus))]
            stimulus = {(pat % i):s for (i,s) in enumerate(stimulus)}
        else:
            raise ValueError('stimulus is not iterable nor a filename')
    else:
        order = stimulus.keys()
    # we can use the stimulus_importer function no matter what the stimulus arguments are
    stim_map = {k: import_stimulus(v, gamma_correction_function)
                for (k,v) in six.iteritems(stimulus)}
    for u in six.itervalues(stim_map):
        u.setflags(write=False)
    return {'stimulus_map': pyr.pmap(stim_map), 'stimulus_ordering': pyr.pvector(order)}
Ejemplo n.º 26
0
def build_model(model_name, force_exports=False):
    '''
    sco.build_model(name) builds an SCO model according to the given name and returns it. Valid
      model names correspond to valid SCO plans; see the sco.sco_plans dict.

    The following options may be given:
      * force_exports (default: True) specifies whether exporting functions that included in the
        model should be automatically run when the model is built. If this is set to True then
        all standard exports will complete before the model is returned.
    '''
    import sco.util, pimms
    if pimms.is_str(model_name):
        model_name = model_name.lower()
        _plans = model_data()
        if model_name not in _plans:
            raise ValueError('Unknown mode: %s' % model_name)
        mdl = _plans[model_name]
        dat = mdl.nodes
        if force_exports:
            dat = dat.set('require_exports', sco.util.require_exports)
        return pimms.plan(dat)
    elif pimms.is_plan(model_name):
        if force_exports:
            dat = model_name.nodes
            dat = dat.set('require_exports', sco.util.require_exports)
            return pimms.plan(dat)
        else:
            return model_name
    elif pimms.is_map(model_name):
        dat = model_name
        if force_exports:
            dat = dat.set('require_exports', sco.util.require_exports)
        return pimms.plan(dat)
    else:
        raise ValueError(
            'Unrecognized object type given as first argument to to build_model'
        )
Ejemplo n.º 27
0
def die(*args):
    print(*args)
    sys.exit(1)


if not os.path.isdir(bids_dir):
    die('no BIDS directory found!')

try:
    with open(config_file, 'r') as fl:
        conf = json.load(fl)
except Exception:
    die("Could not read config.json!")

if not pimms.is_map(conf):
    die("config.json must contain a single dictionary")
if 'subjectName' not in conf or not pimms.is_str(conf['subjectName']):
    die('config.json does not contain a valid "subjectName" entry')
if 'sessionName' not in conf or not pimms.is_str(conf['sessionName']):
    die('config.json does not contain a valid "sessionName" entry')

# we just have to find the relevant files then echo them for the calling script; in the case of the
# config file, we write out a new one in the /running directory
sub = conf['subjectName']
ses = conf['sessionName']
opts = conf.get('options', {})
with open(opts_file, 'w') as fl:
    json.dump(opts, fl)

# find the relevant files in the BIDS dir; first, the BOLD image is easy to find:
Ejemplo n.º 28
0
def rtmag_potential(hemi, retinotopy=Ellipsis, mask=Ellipsis, weight=Ellipsis,
                    surface='midgray', min_weight=Ellipsis, min_eccentricity=0.75,
                    visual_area=None, map_visual_areas=Ellipsis,
                    visual_area_field_signs=Ellipsis,
                    measurement_uncertainty=0.4, measurement_knob=1,
                    magnification_knob=2, fieldsign_knob=8, edge_knob=0, rt_knob=0):
    from neuropythy.vision.retinotopy import clean_retinotopy_potential
    import neuropythy.optimize as op
    f_ret = clean_retinotopy_potential(hemi, retinotopy=retinotopy, mask=mask, weight=weight,
                                       surface=surface, min_weight=min_weight,
                                       min_eccentricity=min_eccentricity,
                                       visual_area=visual_area, map_visual_areas=map_visual_areas,
                                       visual_area_field_signs=visual_area_field_signs,
                                       measurement_uncertainty=measurement_uncertainty,
                                       measurement_knob=measurement_knob,
                                       magnification_knob=magnification_knob,
                                       fieldsign_knob=fieldsign_knob, edge_knob=edge_knob)
    # process a few additional arguments:
    if   visual_area_field_signs is None:     visual_area_field_signs = {}
    elif visual_area_field_signs is Ellipsis: visual_area_field_signs = {1:-1, 2:1, 3:-1, 4:1}
    # this may be a lazy map of visual areas; we want to operate on all of them lazily, so wrap the
    # rest of this model up in a function:
    def make_potential(va):
        global_field_sign = None if va is None else visual_area_field_signs.get(va)
        f_r = f_ret if va is None else f_ret[va]
        # The initial parameter vector is stored in the meta-data:
        X0 = f_r.meta_data['X0']
        # A few other handy pieces of data we can extract:
        fieldsign = visual_area_field_signs.get(va)
        submesh = f_r.meta_data['mesh']
        sxyz = submesh.coordinates
        n = submesh.vertex_count
        (u,v) = submesh.tess.indexed_edges
        selen = submesh.edge_lengths
        sarea = submesh.face_areas
        m = submesh.tess.edge_count
        fs = submesh.tess.indexed_faces
        neis = submesh.tess.indexed_neighborhoods
        fangs = submesh.face_angles
        # we're adding r and t (radial and tangential visual magnification) pseudo-parameters to
        # each vertex; r and t are derived from the position of other vertices; our first step is
        # to derive these values; for this we start with the parameters themselves:
        (x,y) = [op.identity[np.arange(k, 2*n, 2)] for k in (0,1)]
        # okay, we need to setup a bunch of least-squares solutions, one for each vertex:
        nneis = np.asarray([len(nn) for nn in neis])
        maxneis = np.max(nneis)
        thts = op.atan2(y, x)
        eccs = op.compose(op.piecewise(op.identity, ((-1e-9, 1e-9), 1)),
                          op.sqrt(x**2 + y**2))
        coss = x/eccs
        sins = y/eccs
        # organize neighbors:
        # neis becomes a list of rows of 1st neighbor, second neighbor etc. with -1 indicating none
        neis = np.transpose([nei + (-1,)*(maxneis - len(nei)) for nei in neis])
        qnei = (neis > -1) # mark where there are actually neighbors
        neis[~qnei] = 0 # we want the -1s (now 0s) to behave okay when passed to a potential index
        # okay, walk through the neighbors setting up the least squares
        (r, t) = (None, None)
        for (k,q,nei) in zip(range(len(neis)), qnei.astype('float'), neis):
            xx = x[nei] - x
            yy = y[nei] - y
            sd = np.sum((sxyz[:,nei].T - sxyz[:,k])**2, axis=1)
            (xx, yy) = (xx*coss + yy*sins, yy*coss - xx*sins)
            xterm = (op.abs(xx) * q)
            yterm = (op.abs(yy) * q)
            r = xterm if r is None else (r + xterm)
            t = yterm if t is None else (t + yterm)
        (r, t) = [uu * zinv(nneis) for uu in (r, t)]
        # for neighboring edges, we want r and t to be similar to each other
        f_rtsmooth = op.sum((r[v]-r[u])**2 + (t[v]-t[u])**2) / m
        # we also want r and t to predict the radial and tangential magnification of the node, so
        # we want to make sure that edges are the right distances away from each other based on the
        # surface edge lengths and the distance around the vertex at the center
        # for this we'll want some constant info about the surface edges/angles
        # okay, in terms of the visual field coordinates of the parameters, we will want to know
        # the angular position of each node
        # organize face info
        mnden   = 0.0001
        (e,qs,qt) = np.transpose([(i,e[0],e[1]) for (i,e) in enumerate(submesh.tess.edge_faces)
                                  if len(e) == 2 and selen[i] > mnden
                                  if sarea[e[0]] > mnden and sarea[e[1]] > mnden])
        (fis,q) = np.unique(np.concatenate([qs,qt]), return_inverse=True)
        (qs,qt)   = np.reshape(q, (2,-1))
        o       = len(fis)
        faces   = fs[:,fis]
        fangs   = fangs[:,fis]
        varea   = op.signed_face_areas(faces)
        srfangmtx = sps.csr_matrix(
            (fangs.flatten(),
             (faces.flatten(), np.concatenate([np.arange(o), np.arange(o), np.arange(o)]))),
            (n, o))
        srfangtot = flattest(srfangmtx.sum(axis=1))
        # normalize this angle matrix by the total and put it back in the same order as faces
        srfangmtx = zdivide(srfangmtx, srfangtot / (np.pi*2)).tocsr().T
        nrmsrfang = np.array([sps.find(srfangmtx[k])[2][np.argsort(fs[:,k])] for k in range(o)]).T
        # okay, now compare these to the actual angles;
        # we also want to know, for each edge, the angle relative to the radial axis; let's start
        # by organizing the faces into the units we compute over:
        (fa,fb,fc) = [np.concatenate([faces[k], faces[(k+1)%3], faces[(k+2)%3]]) for k in range(3)]
        atht = thts[fa]
        # we only have to worry about the (a,b) and (a,c) edges now; from the perspective of a...
        bphi = op.atan2(y[fb] - y[fa], x[fb] - x[fa]) - atht
        cphi = op.atan2(y[fc] - y[fa], x[fc] - x[fa]) - atht
        ((bcos,bsin),(ccos,csin)) = bccssn = [(op.cos(q),op.sin(q)) for q in (bphi,cphi)]
        # the distance should be predicted by surface edge length times ellipse-magnification
        # prediction; we have made uphi and vphi so that radial axis is x axis and tan axis is y
        (ra,ta) = (op.abs(r[fa]), op.abs(t[fa]))
        bslen = np.sqrt(np.sum((sxyz[:,fb] - sxyz[:,fa])**2, axis=0))
        cslen = np.sqrt(np.sum((sxyz[:,fc] - sxyz[:,fa])**2, axis=0))
        bpre_x = bcos * ra * bslen
        bpre_y = bsin * ta * bslen
        cpre_x = ccos * ra * cslen
        cpre_y = csin * ta * cslen
        # if there's a global field sign, we want to invert these predictions when the measured
        # angle is the wrong sign
        if global_field_sign is not None:
            varea_f = varea[np.concatenate([np.arange(o) for _ in range(3)])] * global_field_sign
            fspos = (op.sign(varea_f) + 1)/2
            fsneg = 1 - fspos
            (bpre_x,bpre_y,cpre_x,cpre_y) = (
                bpre_x*fspos - cpre_x*fsneg, bpre_y*fspos - cpre_y*fsneg,
                cpre_x*fspos - bpre_x*fsneg, cpre_y*fspos - bpre_y*fsneg)
        (ax,ay,bx,by,cx,cy) = [x[fa],y[fa],x[fb],y[fb],x[fc],y[fc]]
        (cost,sint) = [op.cos(atht), op.sin(atht)]
        (bpre_x, bpre_y) = (bpre_x*cost - bpre_y*sint + ax, bpre_x*sint + bpre_y*cost + ay)
        (cpre_x, cpre_y) = (cpre_x*cost - cpre_y*sint + ax, cpre_x*sint + cpre_y*cost + ay)
        # okay, we can compare the positions now...
        f_rt = op.sum((bpre_x-bx)**2 + (bpre_y-by)**2 + (cpre_x-cx)**2 + (cpre_y-cy)**2) * 0.5/o
        f_vmag = f_rtsmooth # + f_rt #TODO: the rt part of this needs to be debugged
        wgt = 0 if rt_knob is None else 2.0**rt_knob
        f = f_r if rt_knob is None else (f_r + f_vmag) if rt_knob == 0 else (f_r + w*f_vmag)
        md = pimms.merge(f_r.meta_data,
                         dict(f_retinotopy=f_r, f_vmag=f_vmag, f_rtsmooth=f_rtsmooth, f_rt=f_rt))
        object.__setattr__(f, 'meta_data', md)
        return f
    if pimms.is_map(f_ret):
        return pimms.lazy_map({va: curry(make_potential, va) for va in six.iterkeys(f_ret)})
    else: return make_potential(None)
Ejemplo n.º 29
0
def import_measurements(measurements_filename=None):
    '''
    import_measurements is a calculator that imports measured data from a given filename or pair
    of filenames (in the case of surface modalities), and converts them into a matrix of
    measurement values that is the same size os the matrix 'predictions' produced by the sco plan.

    Required afferent values:
      @ measurements_filename Must be either the filename of a volume of measurements or a tuple of
        (lh_filename, rh_filename) if surface files are provided.

    Provided efferent values:
      @ measurements Will be an (n x m) matrix of the measured values whose rows correspond to the
        anatomical ids and whose columns correspond to the images.
      @ measurement_indices Will be a list, one per measurement voxel/vertex whose data appears in
        the measurements matrix, of the voxel-index triple (i,j,k) or the vertex id of each
        measurement; in the latter case, right-hemisphere ids will overlap with left-hemisphere ids,
        and the measurement_hemispheres value must be used to distinguish them.
      @ measurement_coordinates Will be a list of coordinates, one per element of the measurements
        matrix; if the imported measurements were from surface files, then this is None. Note that
        when the measurements filename refers to a NifTI file, this *always* uses the qform affine
        transform to produce coordinates.
      @ measurement_hemispheres Will be a list whose values are all either +1 or -1, one per row of
        the measurements matrix if the measurements that are imported come from surface files; if
        they do not, then this will be None. For surface files, +1 and -1 indicate LH and RH,
        respectively.
    '''
    if measurements_filename is None: return (None, None, None, None)
    meas = None
    idcs = None
    crds = None
    hems = None
    if len(measurements_filename) == 2:
        if pimms.is_map(measurements_filename):
            try:
                measurements_filename = (measurements_filename['lh'],
                                         measurements_filename['rh'])
            except:
                measurements_filename = (measurements_filename['LH'],
                                         measurements_filename['RH'])
        meas = [None, None]
        idcs = [None, None]
        hems = [None, None]
        for (fnm, hsgn, hidx) in zip(measurements_filename, [1, -1], [0, 1]):
            vol = import_mri(fnm)
            vol = vol if len(vol) == n else vol.T
            if len(vol.shape) != 2:
                raise ValueError(
                    'measurement surf file %s must have 2 non-unit dimensions'
                    % fnm)
            vstd = np.std(vol, axis=1)
            ii = np.where(np.isfinite(vstd) * ~np.isclose(vstd, 0))[0]
            if len(ii) == 0:
                raise ValueError(
                    'measurement surf file %s contained no valid rows' % fnm)
            meas[hidx] = vol
            idcs[hidx] = ii
            hems[hidx] = np.full(len(vol), hsgn, dtype=np.int)
        if not np.array_equal(hsz[0][1:], hsz[1][1:]):
            raise ValueError(
                '(LH,RH) measurements dims must be the same: (%d, %d)' %
                tuple(hsz))
        (meas, idcs,
         hems) = [np.concatenate(x, axis=0) for x in [meas, idcs, hems]]
    else:
        img = import_mri(measurements_filename, 'object')
        vol = img.dataobj.get_unscaled()
        if len(vol.shape) != 4:
            raise ValueError('measurement volume files must have 4 dimensions')
        h = img.header
        tx = img.affine if isinstance(img, fsmgh.MGHImage) else h.get_qform()
        # we need to find the valid voxels; these are the ones that have non-zero variance and
        # that contain no NaNs or infinite values
        vstd = np.std(vol, axis=-1)
        idcs = np.where(np.isfinite(vstd) * ~np.isclose(vstd, 0))
        meas = vol[idcs]
        idcs = np.asarray(idcs, dtype=np.int).T
        crds = apply_affine(tx, np.asarray(idcs, dtype=np.float))
    for x in [meas, idcs, crds, hems]:
        if x is not None: x.setflags(write=False)
    return (meas, idcs, crds, hems)