Esempio n. 1
0
 def _load(pdir, flnm, loadfn, *argmaps, **kwargs):
     inst = pimms.merge(*(argmaps + (kwargs, )))
     flnm = flnm.format(**inst)
     args = pimms.merge(*argmaps, **kwargs)
     #logging.info('FileMap: loading file "%s"...\n' % flnm) #debug
     try:
         lpth = pdir.local_path(flnm)
         #logging.info('     ... local path: %s\n' % lpth) #debug
         args = pimms.merge(*argmaps, **kwargs)
         loadfn = inst['load'] if 'load' in args else loadfn
         #filtfn = inst['filt'] if 'filt' in args else lambda x,y:x
         dat = loadfn(lpth, args)
         #dat = filtfn(dat, args)
     except Exception:
         dat = None
         #raise
     # check for miss instructions if needed
     if dat is None and 'miss' in argmaps: miss = args['miss']
     else: miss = None
     if pimms.is_str(miss) and miss.lower() in ('error', 'raise',
                                                'exception'):
         raise ValueError('File %s failed to load' % flnm)
     elif miss is not None:
         dat = miss(flnm, args)
     return dat
Esempio n. 2
0
def to_image(img, image_type=None, meta_data=None, **kwargs):
    '''
    to_image(array) yields a Nifti1Image of the given array with default meta-data.
    to_image(array, image_type) yields an image object of the given type; image_type may either be
      an image class or a class name (see supported types below).
    to_image((array, meta_data)) uses the given mapping of meta-data to fill in the image's
      meta-data; note that meta_data may simply be an affine transformation matrix.
    to_image((array, affine, meta_data)) uses the given affine specifically (the given affine
      overrides any affine included in the meta_data).

    Note that the array may optionally be an image itself, in which case its meta-data is used as a
    starting point for the new meta-data. Any meta-data passed as a tuple overwrites this meta-data,
    and any meta-data passed as an optional argument overwrites this meta-data in turn.

    The first optional argument, specifying image_type is as an image type if possible, but if a
    meta-data mapping is passed as the first argument it is used as such; otherwise, the optional
    third argument is named meta_data, and any additional keyword arguments passed to to_image are
    merged into this meta_data object left-to-right (i.e., keyword arguments overwrite the meta_data
    keys).
    '''
    # quick cleanup of args:
    if not pimms.is_map(meta_data): meta_data = to_image_meta_data(meta_data)
    meta_data = pimms.merge({} if meta_data is None else meta_data, kwargs)
    if image_type is None: image_type = 'nifti1'
    # deduce image type
    image_type = to_image_type(image_type)
    # okay, next, parse the image argument itself:
    if is_tuple(img):
        if len(img) == 1: (img, aff, mdat) = (img[0], None, None)
        elif len(img) == 2: (img, aff, mdat) = (img[0], None, img[1])
        elif len(img) == 3: (img, aff, mdat) = img
        else:
            raise ValueError(
                'cannot parse more than 3 elements from image tuple')
    else:
        (aff, mdat) = (None, None)
    # see if the img argument is an image object
    try:
        (img, aff0, mdat0) = (img.dataobj, img.affine, to_image_meta_data(img))
    except Exception:
        (aff0, mdat0) = (None, {})
    # check that the affine wasn't given as the meta-data (e.g. (img,aff) instead of (img,mdat))
    if aff is None and mdat is not None:
        try:
            (aff, mdat) = (to_affine(mdat, 3), {})
        except Exception:
            pass
    # parse the meta-data that has been given
    mdat = dict(pimms.merge(mdat0, {} if mdat is None else mdat, meta_data))
    # if there is an explicit affine, we put it into mdat now
    if aff is not None: mdat['affine'] = to_affine(aff, 3)
    if aff0 is not None and 'affine' not in mdat:
        mdat['affine'] = to_affine(aff0, 3)
    # okay, we create the image now:
    return image_type.create(img, meta_data=mdat)
Esempio n. 3
0
def images_from_filemap(fmap):
    '''
    images_from_filemap(fmap) yields a persistent map of MRImages tracked by the given subject with
      the given name and path; in freesurfer subjects these are renamed and converted from their
      typical freesurfer filenames (such as 'ribbon') to forms that conform to the neuropythy naming
      conventions (such as 'gray_mask'). To access data by their original names, use the filemap.
    '''
    imgmap = fmap.data_tree.image

    def img_loader(k):
        return lambda: imgmap[k]

    imgs = {k: img_loader(k) for k in six.iterkeys(imgmap)}

    def _make_mask(val, eq=True):
        rib = imgmap['ribbon']
        arr = (rib.dataobj == val) if eq else (rib.dataobj != val)
        arr.setflags(write=False)
        return type(rib)(arr, rib.affine, rib.header)

    imgs['lh_gray_mask'] = lambda: _make_mask(3)
    imgs['lh_white_mask'] = lambda: _make_mask(2)
    imgs['rh_gray_mask'] = lambda: _make_mask(42)
    imgs['rh_white_mask'] = lambda: _make_mask(41)
    imgs['brain_mask'] = lambda: _make_mask(0, False)
    # merge in with the typical images
    return pimms.merge(fmap.data_tree.image, pimms.lazy_map(imgs))
Esempio n. 4
0
 def update_hemi(subname, hemis, hname):
     # get the original hemisphere...
     hemi = hemis[hname]
     stup = {'sub': subname, 'hemi': hname}
     pdat = {}
     sprops = (BensonWinawer2018Dataset.fsaverage_properties
               if subname == 'fsaverage' else
               BensonWinawer2018Dataset.subject_properties)
     # okay, now we want to load a bunch of data; start with properties
     for (propname, filename) in six.iteritems(sprops):
         filename = os.path.join(cache_directory, filename.format(stup))
         if not os.path.isfile(filename): continue
         pdat[propname] = curry(
             (_load_ints
              if propname.endswith('visual_area') else _load_angle
              if propname.endswith('polar_angle') else nyio.load),
             filename)
     # we can add this already...
     hemi = hemi.with_prop(pimms.lazy_map(pdat))
     # next, we want to grab the registrations...
     rdat = {}
     sregs = (BensonWinawer2018Dataset.fsaverage_registrations
              if subname == 'fsaverage' else
              BensonWinawer2018Dataset.subject_registrations)
     for (rname, filename) in six.iteritems(sregs):
         filename = os.path.join(cache_directory, filename.format(stup))
         if not os.path.isfile(filename): continue
         rdat[rname] = curry(nyio.load, filename, 'freesurfer_geometry')
     hemi = hemi.copy(_registrations=pimms.merge(
         hemi.registrations, pimms.lazy_map(rdat)))
     # that's all
     return hemi
Esempio n. 5
0
 def _parse_path(flnm, path, spaths, path_parameters, inst):
     flnm = flnm.format(**pimms.merge(path_parameters, inst))
     p0 = None
     for k in six.iterkeys(spaths):
         if flnm.startswith(k + ':'):
             (flnm, p0) = (flnm[(len(k) + 1):], k)
             break
     return (p0, flnm)
Esempio n. 6
0
 def with_meta(self, *args, **kwargs):
     '''
     obj.with_meta(...) collapses the given arguments with pimms.merge into the object's current
     meta_data map and yields a new object with the new meta-data.
     '''
     md = pimms.merge(self.meta_data, *(args + (kwargs,)))
     if md is self.meta_data: return self
     else: return self.copy(meta_data=md)
Esempio n. 7
0
 def __init__(self, meta_data=None, create_directories=True, create_mode=0o755):
     if meta_data is None: meta_data = BensonWinawer2018Dataset.prf_meta_data
     else: meta_data = pimms.merge(BensonWinawer2018Dataset.prf_meta_data, meta_data)
     Dataset.__init__(self, 'benson_winawer_2018',
                      meta_data=meta_data,
                      custom_directory=config['benson_winawer_2018_path'],
                      create_directories=create_directories,
                      create_mode=create_mode)
Esempio n. 8
0
def to_image(img, image_type=None, spec=None, **kwargs):
    '''
    to_image(array) yields a Nifti1Image of the given array with default meta-data spec.
    to_image(array, image_type) yields an image object of the given type; image_type may either be
      an image class or a class name (see supported types below).
    to_image((array, spec)) uses the given mapping of meta-data (spec) to construct the image-spec
      note that spec may simply be an affine transformation matrix or may be an image.
    to_image((array, affine, spec)) uses the given affine specifically (the given affine
      overrides any affine included in the spec meta-data).
    to_image(imspec) constructs an image with the properties specified in the given imspec; the
      special optional argument fill (default: 0.0) can be set to something else to specify what the
      default cell value should be.

    Note that the array may optionally be an image itself, in which case its spec is used as a
    starting point for the new spec. Any spec-data passed as a tuple overwrites this spec-data,
    and any spec-data passed as an optional argument overwrites this spec-data in turn.

    The first optional argument, specifying image_type is as an image type if possible, but if a
    spec-data mapping or equivalent (e.g., an image header or affine) is passed as the first
    argument it is used as such; otherwise, the optional third argument is named spec, and any
    additional keyword arguments passed to to_image are merged into this spec object left-to-right
    (i.e., keyword arguments overwrite the spec keys).

    If no affine is given and the image object given is an array then a FreeSurfer-like transform
    that places the origin at the center of the image.
    '''
    # make sure we return unchanged if no change requested
    if is_image(img) and image_type is None and spec is None and len(kwargs) == 0: return img
    elif is_image_spec(img):
        fill = kwargs.pop('fill', 0.0)
        return to_image(image_spec_to_image(img, fill=fill),
                        image_type=image_type, spec=spec, **kwargs)
    # quick cleanup of args:
    # we have a variety of things that go into spec; in order (where later overwrites earlier):
    # (1) img spec, (2) image_type map (if not an image type) (3) spec, (4) kw args
    # see if image_type is actually an image type (might be a spec/image)...
    if pimms.is_str(image_type) or isinstance(image_type, type):
        (image_type, s2) = (to_image_type(image_type), {})
    else: 
        (image_type, s2) = (None, {} if image_type is None else to_image_spec(image_type))
    if image_type is None: image_type = image_types_by_name['nifti1']
    s3 = {} if spec is None else to_image_spec(spec)
    # okay, next, parse the image argument itself:
    if is_tuple(img):
        if   len(img) == 1: (img,aff,s1) = (img[0], None, {})
        elif len(img) == 2: (img,aff,s1) = (img[0], None, img[1])
        elif len(img) == 3: (img,aff,s1) = img
        else: raise ValueError('cannot parse more than 3 elements from image tuple')
        # check that the affine wasn't given as the meta-data (e.g. (img,aff) instead of (img,mdat))
        if aff is None and s1 is not None:
            try:    (aff, s1) = (to_affine(s1, 3), {})
            except Exception: pass
    else: (aff,s1) = (None, {})
    s0 = to_image_spec(img)
    spec = pimms.merge(s0, s1, s2, s3, kwargs)
    if aff is not None: spec = pimms.assoc(spec, affine=to_affine(aff, 3))
    # okay, we create the image now:
    return image_type.create(img, meta_data=spec)
def calc_arguments(args):
    '''
    calc_arguments is a calculator that parses the command-line arguments for the registration
    command and produces the subject, the model, the log function, and the additional options.
    '''
    (args, opts) = _retinotopy_parser(args)
    # We do some of the options right here...
    if opts['help']:
        print(info, file=sys.stdout)
        sys.exit(1)
    # and if we are verbose, lets setup a note function
    verbose = opts['verbose']
    def note(s):
        if verbose:
            print(s, file=sys.stdout)
            sys.stdout.flush()
        return verbose
    def error(s):
        print(s, file=sys.stderr)
        sys.stderr.flush()
        sys.exit(1)
    if len(args) < 1: error('subject argument is required')
    # Add the subjects directory, if there is one
    if 'subjects_dir' in opts and opts['subjects_dir'] is not None:
        add_subject_path(opts['subjects_dir'])
    # Get the subject now
    try: sub = subject(args[0])
    except: error('Failed to load subject %s' % args[0])
    # and the model
    if len(args) > 1:       mdl_name = args[1]
    elif opts['model_sym']: mdl_name = 'schira'
    else:                   mdl_name = 'benson17'
    try:
        if opts['model_sym']:
            model = {h:retinotopy_model(mdl_name).persist() for h in ['lh', 'rh']}
        else:
            model = {h:retinotopy_model(mdl_name, hemi=h).persist() for h in ['lh', 'rh']}
    except: error('Could not load retinotopy model %s' % mdl_name)

    # Now, we want to run a few filters on the options
    # Parse the simple numbers
    for o in ['weight_min', 'scale', 'max_step_size', 'max_out_eccen',
              'max_in_eccen', 'min_in_eccen', 'field_sign_weight', 'radius_weight']:
        opts[o] = float(opts[o])
    opts['max_steps'] = int(opts['max_steps'])
    # Make a note:
    note('Processing subject: %s' % sub.name)
    del opts['help']
    del opts['verbose']
    del opts['subjects_dir']
    # That's all we need!
    return pimms.merge(opts,
                       {'subject': sub.persist(),
                        'model':   pyr.pmap(model),
                        'options': pyr.pmap(opts),
                        'note':    note,
                        'error':   error})
Esempio n. 10
0
    def create(self, arr, meta_data={}, **kwargs):
        '''
        itype.create(dataobj) yields an image of the given image type itype that represents the
          given data object dataobj.
        itype.create(dataobj, meta_data) uses the given meta/header data to create the image.

        Any number of keyword arguments may also be appended to the call; these are merged into the
        meta_data argument.
        '''
        return self.to_image(arr, hdat=pimms.merge(meta_data, kwargs))
Esempio n. 11
0
 def minimize(self, x0, **kwargs):
     '''
     pf.minimize(x0) minimizes the given potential function starting at the given point x0; any
       additional options are passed along to scipy.optimize.minimize.
     '''
     x0 = np.asarray(x0)
     kwargs = pimms.merge({'jac':self.jac(), 'method':'CG'}, kwargs)
     res = spopt.minimize(self.fun(), x0.flatten(), **kwargs)
     res.x = np.reshape(res.x, x0.shape)
     return res
Esempio n. 12
0
 def _load(pdir, flnm, loadfn, *argmaps, **kwargs):
     try:
         lpth = pdir.local_path(flnm)
         args = pimms.merge(*argmaps, **kwargs)
         loadfn = inst['load'] if 'load' in args else loadfn
         filtfn = inst['filt'] if 'filt' in args else lambda x, y: x
         dat = loadfn(lpth, args)
         dat = filtfn(dat, args)
     except Exception:
         dat = None
     # check for miss instructions if needed
     if dat is None and 'miss' in args:
         miss = args['miss']
     elif pimms.is_str(miss) and miss.lower() in ('error', 'raise',
                                                  'exception'):
         raise ValueError('File %s failed to load' % flnm)
     elif miss is not None:
         dat = miss(flnm, args)
     return dat
Esempio n. 13
0
def cortex_from_filemap(fmap, name, affine=None):
    '''
    cortex_from_filemap(filemap, chirality, name) yields a cortex object from the given filemap.
    '''
    chirality = name[:2].lower()
    # get the relevant hemi-data
    hdat = fmap.data_tree.hemi[name]
    # we need the tesselation at build-time, so let's create that now:
    tris = hdat.tess['white']
    # this tells us the max number of vertices
    n = np.max(tris) + 1

    # Properties: we want to merge a bunch of things together...
    # for labels, weights, annots, we need to worry about loading alts:
    def _load_with_alt(k, s0, sa, trfn):
        if s0 is not None:
            try:
                u = s0.get(k, None)
            except Exception:
                u = None
        else:
            u = None
        if u is None and sa is not None:
            try:
                u = sa.get(k, None)
            except Exception:
                u = None
        if u is None: print(s0.get(k, None), sa.get(k, None))
        if u is None:
            raise ValueError('Exception while loading property %s' % k)
        else:
            return u if trfn is None else trfn(u)

    def _lbltr(ll):
        l = np.zeros(n, dtype='bool')
        l[ll[0]] = True
        l.setflags(write=False)
        return l

    def _wgttr(ll):
        w = np.zeros(n, dtype='float')
        w[ll[0]] = ll[1]
        w.setflags(write=False)
        return w

    def _anotr(ll):
        ll[0].setflags(write=False)
        return ll[0]

    p = {}
    from itertools import chain
    l = hdat.label if hasattr(hdat, 'label') else {}
    al = hdat.alt_label if hasattr(hdat, 'alt_label') else {}
    for k in set(chain(six.iterkeys(l), six.iterkeys(al))):
        p[k + '_label'] = curry(_load_with_alt, k, l, al, _lbltr)
    w = hdat.weight if hasattr(hdat, 'weight') else {}
    aw = hdat.alt_weight if hasattr(hdat, 'alt_weight') else {}
    for k in set(chain(six.iterkeys(w), six.iterkeys(aw))):
        p[k + '_weight'] = curry(_load_with_alt, k, w, aw, _wgttr)
    a = hdat.annot if hasattr(hdat, 'annot') else {}
    aa = hdat.alt_annot if hasattr(hdat, 'alt_annot') else {}
    for k in set(chain(six.iterkeys(a), six.iterkeys(aa))):
        p[k] = curry(_load_with_alt, k, a, aa, _anotr)
    props = pimms.merge(hdat.property, pimms.lazy_map(p))
    tess = geo.Tesselation(tris, properties=props)
    # if this is a subject that exists in the library, we may want to add some files:
    if name is None:
        pd = fmap.pseudo_paths[None]._path_data
        name = pd['pathmod'].split(fmap.actual_path)[1]
    regs = hdat.registration
    # Okay, make the cortex object!
    md = {'file_map': fmap}
    if name is not None: md['subject_id'] = name
    return mri.Cortex(chirality,
                      tess,
                      hdat.surface,
                      regs,
                      affine=affine,
                      meta_data=md).persist()
Esempio n. 14
0
 def make_potential(va):
     global_field_sign = None if va is None else visual_area_field_signs.get(va)
     f_r = f_ret if va is None else f_ret[va]
     # The initial parameter vector is stored in the meta-data:
     X0 = f_r.meta_data['X0']
     # A few other handy pieces of data we can extract:
     fieldsign = visual_area_field_signs.get(va)
     submesh = f_r.meta_data['mesh']
     sxyz = submesh.coordinates
     n = submesh.vertex_count
     (u,v) = submesh.tess.indexed_edges
     selen = submesh.edge_lengths
     sarea = submesh.face_areas
     m = submesh.tess.edge_count
     fs = submesh.tess.indexed_faces
     neis = submesh.tess.indexed_neighborhoods
     fangs = submesh.face_angles
     # we're adding r and t (radial and tangential visual magnification) pseudo-parameters to
     # each vertex; r and t are derived from the position of other vertices; our first step is
     # to derive these values; for this we start with the parameters themselves:
     (x,y) = [op.identity[np.arange(k, 2*n, 2)] for k in (0,1)]
     # okay, we need to setup a bunch of least-squares solutions, one for each vertex:
     nneis = np.asarray([len(nn) for nn in neis])
     maxneis = np.max(nneis)
     thts = op.atan2(y, x)
     eccs = op.compose(op.piecewise(op.identity, ((-1e-9, 1e-9), 1)),
                       op.sqrt(x**2 + y**2))
     coss = x/eccs
     sins = y/eccs
     # organize neighbors:
     # neis becomes a list of rows of 1st neighbor, second neighbor etc. with -1 indicating none
     neis = np.transpose([nei + (-1,)*(maxneis - len(nei)) for nei in neis])
     qnei = (neis > -1) # mark where there are actually neighbors
     neis[~qnei] = 0 # we want the -1s (now 0s) to behave okay when passed to a potential index
     # okay, walk through the neighbors setting up the least squares
     (r, t) = (None, None)
     for (k,q,nei) in zip(range(len(neis)), qnei.astype('float'), neis):
         xx = x[nei] - x
         yy = y[nei] - y
         sd = np.sum((sxyz[:,nei].T - sxyz[:,k])**2, axis=1)
         (xx, yy) = (xx*coss + yy*sins, yy*coss - xx*sins)
         xterm = (op.abs(xx) * q)
         yterm = (op.abs(yy) * q)
         r = xterm if r is None else (r + xterm)
         t = yterm if t is None else (t + yterm)
     (r, t) = [uu * zinv(nneis) for uu in (r, t)]
     # for neighboring edges, we want r and t to be similar to each other
     f_rtsmooth = op.sum((r[v]-r[u])**2 + (t[v]-t[u])**2) / m
     # we also want r and t to predict the radial and tangential magnification of the node, so
     # we want to make sure that edges are the right distances away from each other based on the
     # surface edge lengths and the distance around the vertex at the center
     # for this we'll want some constant info about the surface edges/angles
     # okay, in terms of the visual field coordinates of the parameters, we will want to know
     # the angular position of each node
     # organize face info
     mnden   = 0.0001
     (e,qs,qt) = np.transpose([(i,e[0],e[1]) for (i,e) in enumerate(submesh.tess.edge_faces)
                               if len(e) == 2 and selen[i] > mnden
                               if sarea[e[0]] > mnden and sarea[e[1]] > mnden])
     (fis,q) = np.unique(np.concatenate([qs,qt]), return_inverse=True)
     (qs,qt)   = np.reshape(q, (2,-1))
     o       = len(fis)
     faces   = fs[:,fis]
     fangs   = fangs[:,fis]
     varea   = op.signed_face_areas(faces)
     srfangmtx = sps.csr_matrix(
         (fangs.flatten(),
          (faces.flatten(), np.concatenate([np.arange(o), np.arange(o), np.arange(o)]))),
         (n, o))
     srfangtot = flattest(srfangmtx.sum(axis=1))
     # normalize this angle matrix by the total and put it back in the same order as faces
     srfangmtx = zdivide(srfangmtx, srfangtot / (np.pi*2)).tocsr().T
     nrmsrfang = np.array([sps.find(srfangmtx[k])[2][np.argsort(fs[:,k])] for k in range(o)]).T
     # okay, now compare these to the actual angles;
     # we also want to know, for each edge, the angle relative to the radial axis; let's start
     # by organizing the faces into the units we compute over:
     (fa,fb,fc) = [np.concatenate([faces[k], faces[(k+1)%3], faces[(k+2)%3]]) for k in range(3)]
     atht = thts[fa]
     # we only have to worry about the (a,b) and (a,c) edges now; from the perspective of a...
     bphi = op.atan2(y[fb] - y[fa], x[fb] - x[fa]) - atht
     cphi = op.atan2(y[fc] - y[fa], x[fc] - x[fa]) - atht
     ((bcos,bsin),(ccos,csin)) = bccssn = [(op.cos(q),op.sin(q)) for q in (bphi,cphi)]
     # the distance should be predicted by surface edge length times ellipse-magnification
     # prediction; we have made uphi and vphi so that radial axis is x axis and tan axis is y
     (ra,ta) = (op.abs(r[fa]), op.abs(t[fa]))
     bslen = np.sqrt(np.sum((sxyz[:,fb] - sxyz[:,fa])**2, axis=0))
     cslen = np.sqrt(np.sum((sxyz[:,fc] - sxyz[:,fa])**2, axis=0))
     bpre_x = bcos * ra * bslen
     bpre_y = bsin * ta * bslen
     cpre_x = ccos * ra * cslen
     cpre_y = csin * ta * cslen
     # if there's a global field sign, we want to invert these predictions when the measured
     # angle is the wrong sign
     if global_field_sign is not None:
         varea_f = varea[np.concatenate([np.arange(o) for _ in range(3)])] * global_field_sign
         fspos = (op.sign(varea_f) + 1)/2
         fsneg = 1 - fspos
         (bpre_x,bpre_y,cpre_x,cpre_y) = (
             bpre_x*fspos - cpre_x*fsneg, bpre_y*fspos - cpre_y*fsneg,
             cpre_x*fspos - bpre_x*fsneg, cpre_y*fspos - bpre_y*fsneg)
     (ax,ay,bx,by,cx,cy) = [x[fa],y[fa],x[fb],y[fb],x[fc],y[fc]]
     (cost,sint) = [op.cos(atht), op.sin(atht)]
     (bpre_x, bpre_y) = (bpre_x*cost - bpre_y*sint + ax, bpre_x*sint + bpre_y*cost + ay)
     (cpre_x, cpre_y) = (cpre_x*cost - cpre_y*sint + ax, cpre_x*sint + cpre_y*cost + ay)
     # okay, we can compare the positions now...
     f_rt = op.sum((bpre_x-bx)**2 + (bpre_y-by)**2 + (cpre_x-cx)**2 + (cpre_y-cy)**2) * 0.5/o
     f_vmag = f_rtsmooth # + f_rt #TODO: the rt part of this needs to be debugged
     wgt = 0 if rt_knob is None else 2.0**rt_knob
     f = f_r if rt_knob is None else (f_r + f_vmag) if rt_knob == 0 else (f_r + w*f_vmag)
     md = pimms.merge(f_r.meta_data,
                      dict(f_retinotopy=f_r, f_vmag=f_vmag, f_rtsmooth=f_rtsmooth, f_rt=f_rt))
     object.__setattr__(f, 'meta_data', md)
     return f
Esempio n. 15
0
 def curried_f(*args, **kwargs):
     return f(*(args0 + args), **pimms.merge(kwargs0, kwargs))
Esempio n. 16
0
class BensonWinawer2018Dataset(Dataset):
    '''
    neuropythy.data['benson_winawer_2018'] is a Dataset containing the publicly provided data from
    the following publication:

    Benson NC, Winawer J (2018) Bayesian analysis of retinotopic maps. BioRxiv. DOI:10.1101/325597

    These data include 8 FreeSurfer subjects each with a set of measured and inferred retinotopic
    maps. These data are provided as follows:
    
    dset = neuropythy.data['benson_winawer_2018']
    sorted(dset.subjects.keys())
    #=> ['S1201', 'S1202', 'S1203', 'S1204', 'S1205', 'S1206', 'S1207', 'S1208']

    dset.subjects['S1202']
    #=> Subject(<S1202>, <'/Users/nben/.cache/benson_winawer_2018/freesurfer_subjects/S1202'>)

    dset.subjects['S1202'].lh
    #=> Cortex(<LH>, <301348 faces>, <150676 vertices>)

    sorted(dset.subjects['S1201'].lh.properties.keys())
    #=> ['convexity', 'curvature', 'index', 'inf-prf00_eccentricity', 'inf-prf00_polar_angle',
    #=>  'inf-prf_radius', 'inf-prf_visual_area', 'label', 'midgray_surface_area',
    #=>  'pial_surface_area', 'prf00_eccentricity', 'prf00_polar_angle', 'prf00_radius',
    #=>  'prf00_variance_explained', 'prf01_eccentricity', 'prf01_polar_angle', 'prf01_radius',
    #=>  'prf01_variance_explained', 'prf02_eccentricity', 'prf02_polar_angle', 'prf02_radius',
    #=>  'prf02_variance_explained', 'prf03_eccentricity', 'prf03_polar_angle', 'prf03_radius',
    #=>  'prf03_variance_explained', 'prf04_eccentricity', 'prf04_polar_angle', 'prf04_radius',
    #=>  'prf04_variance_explained', 'prf05_eccentricity', 'prf05_polar_angle', 'prf05_radius',
    #=>  'prf05_variance_explained', 'prf06_eccentricity', 'prf06_polar_angle', 'prf06_radius',
    #=>  'prf06_variance_explained', 'prf07_eccentricity', 'prf07_polar_angle', 'prf07_radius',
    #=>  'prf07_variance_explained', 'prf08_eccentricity', 'prf08_polar_angle', 'prf08_radius',
    #=>  'prf08_variance_explained', 'prf09_eccentricity', 'prf09_polar_angle', 'prf09_radius',
    #=>  'prf09_variance_explained', 'prf10_eccentricity', 'prf10_polar_angle', 'prf10_radius',
    #=>  'prf10_variance_explained', 'prf11_eccentricity', 'prf11_polar_angle', 'prf11_radius',
    #=>  'prf11_variance_explained', 'prf12_eccentricity', 'prf12_polar_angle', 'prf12_radius',
    #=>  'prf12_variance_explained', 'prf13_eccentricity', 'prf13_polar_angle', 'prf13_radius',
    #=>  'prf13_variance_explained', 'prf14_eccentricity', 'prf14_polar_angle', 'prf14_radius',
    #=>  'prf14_variance_explained', 'prf15_eccentricity', 'prf15_polar_angle', 'prf15_radius',
    #=>  'prf15_variance_explained', 'prf16_eccentricity', 'prf16_polar_angle', 'prf16_radius',
    #=>  'prf16_variance_explained', 'prf17_eccentricity', 'prf17_polar_angle', 'prf17_radius',
    #=>  'prf17_variance_explained', 'prf18_eccentricity', 'prf18_polar_angle', 'prf18_radius',
    #=>  'prf18_variance_explained', 'prf19_eccentricity', 'prf19_polar_angle', 'prf19_radius',
    #=>  'prf19_variance_explained', 'prf20_eccentricity', 'prf20_polar_angle', 'prf20_radius',
    #=>  'prf20_variance_explained', 'prf21_eccentricity', 'prf21_polar_angle', 'prf21_radius',
    #=>  'prf21_variance_explained', 'prf_eccentricity', 'prf_polar_angle', 'prf_radius',
    #=>  'prf_variance_explained', 'prior-prf_eccentricity', 'prior-prf_polar_angle',
    #=>  'prior-prf_radius', 'prior-prf_visual_area', 'thickness', 'volume', 'white_surface_area',
    #=>  'wide-prf_eccentricity', 'wide-prf_polar_angle', 'wide-prf_radius',
    #=>  'wide-prf_variance_explained']

    dset.meta_data
    #=> ['prf', 'prf00', 'prf01', 'prf02', 'prf03', 'prf04', 'prf05', 'prf06', 'prf07', 'prf08',
    #=>  'prf09', 'prf10', 'prf11', 'prf12', 'prf13', 'prf14', 'prf15', 'prf16', 'prf17', 'prf18',
    #=>  'prf19', 'prf20', 'prf21']

    dset.meta_data['prf04']
    #=> {'scan_seconds': 576, 'name': 'training14', 'id': 14, 'scans': 3}

    # Note that the following lines will take awhile to calculate/load from cache due to the size
    # of the data; additionally, the generated cache file is ~1GB.

    dset.v123_table
    #=> itable(('inf_x', 'label', 'radius', 'eccentricity', 'inf_radius', 'hemi', 'x', 'subject',
    #=>         'y', 'inf_y', 'inf_eccentricity', 'midgray_surface_area', 'inf_polar_angle',
    #=>         'polar_angle', 'inf_visual_area', 'pial_surface_area', 'dataset_id', 'dataset_name',
    #=>         'variance_explained', 'white_surface_area'),
    #=>        <50278184 rows>)

    dset.v123_table[100]
    #=> {'inf_x':      4.0969896, 'label': 100, 'radius': 2.096148,    'eccentricity': 3.3061967,
    #=>  'inf_radius': 1.5482311, 'hemi': 'lh', 'x': 0.032615896, 'subject': 'S1208', 'y': -3.30603,
    #=>  'inf_y': -2.0219889, 'inf_eccentricity': 4.5687814, 'midgray_surface_area': 1.072214,
    #=>  'inf_polar_angle': 116.267746, 'polar_angle': 179.43477, 'inf_visual_area': 3,
    #=>  'pial_surface_area': 1.3856983, 'dataset_id': 10, 'dataset_name': 'prf10',
    #=>  'variance_explained': 0.054989286, 'white_surface_area': 0.75872976}
    '''
    dataset_urls = {'analyses':              'https://osf.io/cpfa8/download',
                    'retinotopy':            'https://osf.io/m4k8q/download',
                    #'wang2015':              'https://osf.io/rx9ca/download',
                    'freesurfer_subjects':   'https://osf.io/pu9js/download'}
    prf_meta_data = pyr.m(prf00=pyr.m(id=0,  name='validation', scans=6,  scan_seconds=192*6),
                          prf01=pyr.m(id=1,  name='training01', scans=1,  scan_seconds=192),
                          prf02=pyr.m(id=2,  name='training02', scans=1,  scan_seconds=192),
                          prf03=pyr.m(id=3,  name='training03', scans=1,  scan_seconds=192),
                          prf04=pyr.m(id=4,  name='training04', scans=1,  scan_seconds=192),
                          prf05=pyr.m(id=5,  name='training05', scans=1,  scan_seconds=192),
                          prf06=pyr.m(id=6,  name='training06', scans=1,  scan_seconds=192),
                          prf07=pyr.m(id=7,  name='training07', scans=2,  scan_seconds=192*2),
                          prf08=pyr.m(id=8,  name='training08', scans=2,  scan_seconds=192*2),
                          prf09=pyr.m(id=9,  name='training09', scans=2,  scan_seconds=192*2),
                          prf10=pyr.m(id=10, name='training10', scans=2,  scan_seconds=192*2),
                          prf11=pyr.m(id=11, name='training11', scans=2,  scan_seconds=192*2),
                          prf12=pyr.m(id=12, name='training12', scans=3,  scan_seconds=192*3),
                          prf13=pyr.m(id=13, name='training13', scans=3,  scan_seconds=192*3),
                          prf14=pyr.m(id=14, name='training14', scans=3,  scan_seconds=192*3),
                          prf15=pyr.m(id=15, name='training15', scans=3,  scan_seconds=192*3),
                          prf16=pyr.m(id=16, name='training16', scans=4,  scan_seconds=192*4),
                          prf17=pyr.m(id=17, name='training17', scans=4,  scan_seconds=192*4),
                          prf18=pyr.m(id=18, name='training18', scans=4,  scan_seconds=192*4),
                          prf19=pyr.m(id=19, name='training19', scans=5,  scan_seconds=192*5),
                          prf20=pyr.m(id=20, name='training20', scans=5,  scan_seconds=192*5),
                          prf21=pyr.m(id=21, name='training21', scans=6,  scan_seconds=192*6),
                          prf  =pyr.m(id=99, name='full',       scans=12, scan_seconds=192*12))

    def __init__(self, meta_data=None, create_directories=True, create_mode=0o755):
        if meta_data is None: meta_data = BensonWinawer2018Dataset.prf_meta_data
        else: meta_data = pimms.merge(BensonWinawer2018Dataset.prf_meta_data, meta_data)
        Dataset.__init__(self, 'benson_winawer_2018',
                         meta_data=meta_data,
                         custom_directory=config['benson_winawer_2018_path'],
                         create_directories=create_directories,
                         create_mode=create_mode)
    @staticmethod
    def download(path, create_directories=True, mode=0o755, overwrite=False):
        '''
        BensonWinawer2018Dataset.download(path) downloads the Benson and Winawer (2018) dataset into
          the directory given by path. If the dataset is already found there, then it will not be
          overwritten.

        The following optional parameters may be provided:
          * create_directories (default: True) may be set to False to indicate that the path should
            not be created if it does not already exist.
          * mode (default: 0o755) specifies the permissions that should be used if the directory is
            created.
          * overwrite (default: False) may be set to True to indicate that the dataset should be
            overwritten if it is already found.
        '''
        dataset_urls = BensonWinawer2018Dataset.dataset_urls
        if not os.path.isdir(path):
            if not create_directories: raise ValueError('Path given to download() does not exist')
            else: os.makedirs(path, mode)
        if not overwrite:
            if all(os.path.isdir(os.path.join(path, x)) for x in six.iterkeys(dataset_urls)):
                return path
            elif any(os.path.isdir(os.path.join(path, x)) for x in six.iterkeys(dataset_urls)):
                raise ValueError('some but not all of dataset already downloaded')
        # okay, go through the urls...
        logging.info('neuropythy: Downloading Benson and Winawer (2018) data from osf.io...')
        for (dirname, durl) in six.iteritems(dataset_urls):
            # download the url...
            tgz_file = os.path.join(path, dirname + '.tar.gz')
            logging.info('            Fetching "%s"', tgz_file)
            with urllib.request.urlopen(durl) as response:
                with open(tgz_file, 'wb') as fl:
                    shutil.copyfileobj(response, fl)
            if not tarfile.is_tarfile(tgz_file):
                raise ValueError('Error when downloading %s: not a tar file' % tgz_file)
            # now unzip it...
            logging.info('            Extracting to "%s"', tgz_file)
            with tarfile.open(tgz_file, 'r:gz') as tar:
                tar.extractall(path)
                tar.close()
            # and delete the tar.gz file
            os.remove(tgz_file)
        # That's all!
        return path

    subject_properties = pimms.merge(
        # retinotopy data
        {('%s_%s' % (dset, pname)): os.path.join(
            'retinotopy', '{0[sub]}',
            ('{0[hemi]}' + 
             (('_%02d:%02d_' % (dsmeta['id'], dsmeta['scans'])) if dsmeta['id'] != 99 else '_') + 
             pname_file + '.mgz'))
         for (dset, dsmeta)     in six.iteritems(prf_meta_data)
         for (pname,pname_file) in six.iteritems({'polar_angle':'angle', 'eccentricity':'eccen',
                                                  'radius':'prfsz', 'variance_explained':'vexpl'})},
        # wide-field dataset (will be ignored for subjects other than S1201)
        {('wide-prf_%s' % pname): os.path.join(
            'retinotopy', '{0[sub]}',
            ('{0[hemi]}_widef_' + pname_file + '.mgz'))
         for (pname,pname_file) in six.iteritems({'polar_angle':'angle', 'eccentricity':'eccen',
                                                  'radius':'prfsz', 'variance_explained':'vexpl'})},
        # analyses data
        {('inf-%s_%s' % (dset, pname)): os.path.join(
            'analyses', '{0[sub]}',
            ('{0[hemi]}' + 
             ('.%02d.%s_steps=02500_scale=20.00_clip=12_prior=retinotopy.mgz' % (dsmeta['id'],
                                                                                 pname_file))))
         for (dset, dsmeta)     in six.iteritems(prf_meta_data)
         for (pname,pname_file) in six.iteritems({'polar_angle':'angle', 'eccentricity':'eccen',
                                                  'radius':'sigma', 'visual_area':'varea'})},
        # Benson14 data
        {('prior-prf_%s' % pname): os.path.join(
            'analyses', '{0[sub]}', ('{0[hemi]}.benson14_' + pname_file + '.mgz'))
         for (pname, pname_file) in six.iteritems({'polar_angle':'angle', 'eccentricity':'eccen',
                                                   'radius':'sigma', 'visual_area':'varea'})})
    subject_registrations = pyr.pmap(
        {('%s_retinotopy' % dset): os.path.join(
            'analyses', '{0[sub]}',
            ('{0[hemi]}' + 
             ('.%02d.retinotopy_steps=02500_scale=20.0_clip=12_prior=retinotopy.sphere.reg' %
                 dsmeta['id'])))
         for (dset, dsmeta) in six.iteritems(prf_meta_data)})

    @staticmethod
    def load_subject(cache_directory, sid):
        '''
        BensonWinawer2018Dataset.load_subject(dir, subjid) loads the given subject ID from the given
          Benson and Winawer (2018) cache directory. This directory must contain the relevant
          freesurfer_subjects/, retinotopy/, and analyses/, directories (they should be
          auto-downloaded if accessed via the databases interface).
        '''
        if pimms.is_int(sid): sid = 'S12%02d' % sid
        sub = freesurfer_subject(os.path.join(cache_directory, 'freesurfer_subjects', sid))
        # okay, we need functions that will lazily extract a hemisphere then load the retinotopy,
        # analyses, and atlas data onto it (also lazily)
        def update_hemi(subname, hemis, hname):
            # get the original hemisphere...
            hemi = hemis[hname]
            stup = {'sub':subname, 'hemi':hname}
            pdat = {}
            # okay, now we want to load a bunch of data; start with properties
            for (propname, filename) in six.iteritems(BensonWinawer2018Dataset.subject_properties):
                filename = os.path.join(cache_directory, filename.format(stup))
                if not os.path.isfile(filename): continue
                pdat[propname] = curry(nyio.load, filename)
            # we can add this already...
            hemi = hemi.with_prop(pimms.lazy_map(pdat))
            # next, we want to grab the registrations...
            rdat = {}
            for (rname, filename) in six.iteritems(BensonWinawer2018Dataset.subject_registrations):
                filename = os.path.join(cache_directory, filename.format(stup))
                if not os.path.isfile(filename): continue
                rdat[rname] = curry(nyio.load, filename, 'freesurfer_geometry')
            hemi = hemi.copy(_registrations=pimms.merge(hemi.registrations, pimms.lazy_map(rdat)))
            # that's all
            return hemi
        # okay, update the hemi's map with a curried version of the above and return...
        hemis = reduce(lambda h,hname: h.set(hname, curry(update_hemi, sub.name, sub.hemis, hname)),
                       ['lh','rh'],
                       sub.hemis)
        return sub.copy(hemis=hemis)
    @pimms.value
    def subjects(cache_directory, create_directories, create_mode):
        '''
        dataset.subjects is a lazy persistent map of all the subjects that are part of the
        Benson and Winawer (2018) dataset.
        '''
        # make sure the data are downloaded
        BensonWinawer2018Dataset.download(cache_directory, create_directories=create_directories,
                                          mode=create_mode, overwrite=False)
        # okay, next we want to setup the subjects
        return pimms.lazy_map({s:curry(BensonWinawer2018Dataset.load_subject, cache_directory, s)
                               for s in [('S12%02d' % s) for s in range(1,9)]})
    @pimms.value
    def v123_table(cache_directory, subjects):
        '''
        dataset.v123_table is a pimms ITable object for the BensonWinawer2018Dataset; the table
        contains all relevant pRF data for all cortical surface vertices in the 8 subjects included
        in the paper Benson and Winawer (2018).
        '''
        # First, see if there's a cache file
        cachefl = os.path.join(cache_directory, 'v123_table.p')
        if os.path.isfile(cachefl):
            try: return pimms.load(cachefl)
            except:
                msg = 'neuropythy: Could not load existing v123_table cache file: %s' % cache_file
                warnings.warn(msg)
        # go through, building up arrays of arrays that we will concatenate at the end
        data = AutoDict()
        data.on_miss = lambda:[] # we want it to auto-produce lists...
        # non-retinotopy props we want to add to the data...
        props = ['midgray_surface_area', 'pial_surface_area', 'white_surface_area', 'label']
        for (sid,sub) in six.iteritems(subjects):
            for hname in ['lh','rh']:
                hemi = sub.hemis[hname]
                for (dskey,dsdata) in six.iteritems(BensonWinawer2018Dataset.prf_meta_data):
                    dsid = 99 if dskey == 'prf' else int(dskey[3:])
                    # okay, let's get the raw data we need to process...
                    ang = hemi.prop(dskey + '_polar_angle')
                    ecc = hemi.prop(dskey + '_eccentricity')
                    # and the inferred data...
                    iang = hemi.prop('inf-' + dskey + '_polar_angle')
                    iecc = hemi.prop('inf-' + dskey + '_eccentricity')
                    ilbl = hemi.prop('inf-' + dskey + '_visual_area')
                    # process both of these (get x/y basically)
                    (x, y)  = as_retinotopy({'polar_angle':ang,  'eccentricity':ecc},
                                            'geographical')
                    (ix,iy) = as_retinotopy({'polar_angle':iang, 'eccentricity':iecc},
                                            'geographical')
                    # find the relevant vertices
                    ii = np.where((iecc < 12) & np.sum([ilbl == k for k in (1,2,3)], axis=0))[0]
                    # now add the relevant properties...
                    for p in props: data[p].append(hemi.prop(p)[ii])
                    for p0 in ['polar_angle', 'eccentricity', 'radius', 'variance_explained']:
                        p = dskey + '_' + p0
                        data[p0].append(hemi.prop(p)[ii])
                    for (p,u) in zip(['x', 'y'], [x,y]):
                        data[p].append(u)
                    for p0 in ['_polar_angle', '_eccentricity', '_radius', '_visual_area']:
                        p = 'inf-' + dskey + p0
                        data['inf' + p0].append(hemi.prop(p)[ii])
                    for (p0,u) in zip(['inf_x', 'inf_y'], [ix,iy]):
                        data[p0].append(u)
                    # we also want repeated properties for some things
                    extras = {'subject':sid, 'hemi':hname, 'dataset_id':dsid, 'dataset_name':dskey}
                    for (p,v) in six.iteritems(extras): data[p].append(np.full(len(ii), v))
        # concatenate everything
        data = pimms.itable({k:np.concatenate(v) for (k,v) in six.iteritems(data)})
        if not os.path.isfile(cachefl):
            # try to write out the cache file
            try: pimms.save(cachefl, data)
            except: pass
        return data
Esempio n. 17
0
 def _immeta(meta, **kw):
     meta = pimms.merge(meta, kw)
     fldat.append(meta)
     return meta