Beispiel #1
0
 def model(mdl):
     '''
     rrm.model is the retinotopy model object for the RegisteredRetinotopyModel object rrm.
     '''
     if not isinstance(mdl, RetinotopyModel):
         raise ValueError('given parameter model must be a RetinotopyModel instance')
     return pimms.persist(mdl)
Beispiel #2
0
 def instructions(inst):
     '''
     filemap.instructions is the map of load/save instructions for the given filemap.
     '''
     if not pimms.is_map(inst) and not isinstance(inst, list):
         raise ValueError('instructions must be a map or a list')
     return pimms.persist(inst)
Beispiel #3
0
 def map_projection(mp):
     '''
     rrm.map_projection is the MapProjection object for the RegisteredRetinotopyModel object rrm.
     '''
     if not isinstance(mp, geo.MapProjection):
         raise ValueError('given parameter map_projection must be a MapProjection instance')
     return pimms.persist(mp)
 def generate_DROI_details(self,
                           eccentricity_range=None,
                           angles=None,
                           min_variance_explained=0,
                           method=None):
     '''
     generate_DROI_details() yields a set of lazily computed DROI detailed analyses; these
     analyses are used to generate the DROI table(s).
     '''
     import six
     from neuropythy.util import curry
     f = curry(
         VisualPerformanceFieldsDataset._generate_subject_DROI_details,
         self.subjects)
     m = {
         sid: pimms.lmap({
             h: curry(f,
                      sid,
                      h,
                      eccentricity_range=eccentricity_range,
                      angles=angles,
                      min_variance_explained=min_variance_explained,
                      method=method)
             for h in ['lh', 'rh']
         })
         for sid in six.iterkeys(self.subjects)
     }
     return pimms.persist(m)
Beispiel #5
0
 def meta_data(self, img):
     from neuropythy.hcp import cifti_axis_spec
     d = {}
     axdat = cifti_axis_spec(img)
     d['axes_data'] = axdat
     d['image_shape'] = tuple([ax.get('size', None) for ax in axdat])
     #d['voxel_type'] = img.get_data_dtype()
     return pimms.persist(d)
Beispiel #6
0
 def path_parameters(pp):
     '''
     filemap.path_parameters is a map of parameters for the filemap's path.
     '''
     if pp is None: return pyr.m()
     elif not pimms.is_map(pp):
         raise ValueError('path perameters must be a mapping')
     else:
         return pimms.persist(pp)
Beispiel #7
0
def calc_clean_maps(raw_retinotopy,
                    hemisphere_data,
                    label_data,
                    worklog,
                    no_clean=False):
    '''
    calc_clean_maps calculates cleaned retintopic maps.

    Afferent parameters:
      @ no_clean 
        May be set to True to indicate that cleaning of the retinotopic maps should be skipped;
        downstream calculations such as that of cortical magnification will instead be performed on
        the raw and not the clean retinotopy.
    '''
    from neuropythy.vision import (clean_retinotopy, as_retinotopy)
    if no_clean:
        worklog('Skipping retinotopic map cleaning...')
        cl = {
            h: {
                'polar_angle': raw_retinotopy['angle'][h],
                'eccentricity': raw_retinotopy['eccen'][h],
                'visual_area': label_data[h]
            }
            for h in six.iterkeys(hemisphere_data)
        }
        return {'clean_retinotopy': pimms.persist(cl)}
    worklog('Calculating cleaned retinotopic maps...')
    wl = worklog.indent()
    res = {}
    for (h, hem) in six.iteritems(hemisphere_data):
        wl('%s' % h)
        # collect the retinotopy and run the cleaning
        ret = {
            'polar_angle': raw_retinotopy['angle'][h],
            'eccentricity': raw_retinotopy['eccen'][h],
            'variance_explained': raw_retinotopy['weight'][h]
        }
        if label_data and h in label_data: ret['visual_area'] = label_data[h]
        cl = clean_retinotopy(hem, retinotopy=ret)
        res[h] = {'polar_angle': cl[0], 'eccentricity': cl[1]}
        if label_data and h in label_data:
            res[h]['visual_area'] = label_data[h]
    # That's all...
    return {'clean_retinotopy': pimms.persist(res)}
Beispiel #8
0
def calc_labels(subject, hemisphere_tags, hemisphere_data, labels=None):
    '''
    calc_labels finds the available label data for the subject on which the retinotopy operations
    are being performed.

    Afferent parameters:
      @ labels 
        The filenames of the files containing label data for the subject's hemispheres. Label data
        can be provided in mgz, annot, or curv files containing visual area labels, one per vertex.
        The labels argument may be specified as a comma-separated list of filenames (in the same
        order as the hemispheres, which are lh then rh by default) or as a single template filename
        that may contain the character * as a stand-in for the hemisphere tag. For example,
        '/data/*.v123_labels.mgz' would look for the file /data/lh.v123_labels.mgz for the 'lh'
        hemisphere and for /data/rh_LR32k.v123_labels.mgz for the 'rh_LR32k' hemisphere.
        Note that labels are not required--without labels, no field-sign minimization is performed,
        so retinotopic cleaning may be less reliable. Note that additionally, labels may be
        preceded by the relevant tag; so instead of '/data/*.v123_labels.mgz' with, as in the 
        example, hemispheres 'lh,rh_LR32k', one could use the arguments
        'lh:/data/lh.v123_labels.mgz,rh:/data/rh_LR32k.v123_labels.mgz' (for labels) and
        'lh,rh:rh_LR32k' for hemispheres.
    '''
    lbls = {}
    # no argument this is fine--no labels are used
    if lbls is None: return {'label_data': pyr.m()}
    if not pimms.is_str(labels):
        raise ValueError('could not understand non-string labels')
    # first, it might just be a template pattern
    fls = {}
    if '*' in labels:
        sparts = labels.split('*')
        for h in hemisphere_tags:
            flnm = h.join(sparts)
            fls[h] = os.path.expanduser(os.path.expandvars(flnm))
    else:
        # okay, separate commas...
        lsplit = labels.split(',')
        for (k, l) in enumerate(lsplit):
            if ':' in l: (tag, name) = l.split(':')
            elif k < len(hemisphere_tags):
                (tag, name) = (hemisphere_taks[k], l)
            else:
                raise ValueError('could not match labels to hemispheres')
            if tag not in hemisphere_data:
                raise ValueError('Tag %s (in labels arg) does not exist' %
                                 (tag, ))
            fls[tag] = os.path.expanduser(os.path.expandvars(name))
    for (tag, name) in six.iteritems(fls):
        if not os.path.isfile(name):
            raise ValueError('Labels filename %s not found' % (name, ))
        hem = hemisphere_data[tag]
        tmp = nyio.load(name)
        if not pimms.is_vector(tmp) or len(tmp) != hem.vertex_count:
            raise ValueError('Labels file %s does not contain label data' %
                             (name, ))
        lbls[tag] = np.asarray(tmp)
    return {'label_data': pimms.persist(lbls)}
Beispiel #9
0
def subject_from_filemap(fmap,
                         name=None,
                         meta_data=None,
                         check_path=True,
                         default_alignment='MSMAll'):
    '''
    subject_from_filemap(fmap) yields an HCP subject from the given filemap.
    '''
    # start by making a pseudo-dir:
    if check_path and not is_hcp_subject_path(fmap.pseudo_paths[None]):
        raise ValueError('given path does not appear to hold an HCP subject')
    # we need to go ahead and load the ribbon...
    rib = fmap.data_tree.image['ribbon']
    vox2nat = rib.affine
    # make images and hems
    imgs = images_from_filemap(fmap)
    # many hemispheres to create:
    hems = pimms.lazy_map({
        h: curry(cortex_from_filemap, fmap, h)
        for h in [
            'lh_native_MSMAll', 'rh_native_MSMAll', 'lh_nat32k_MSMAll',
            'rh_nat32k_MSMAll', 'lh_nat59k_MSMAll', 'rh_nat59k_MSMAll',
            'lh_LR32k_MSMAll', 'rh_LR32k_MSMAll', 'lh_LR59k_MSMAll',
            'rh_LR59k_MSMAll', 'lh_LR164k_MSMAll', 'rh_LR164k_MSMAll',
            'lh_native_MSMSulc', 'rh_native_MSMSulc', 'lh_nat32k_MSMSulc',
            'rh_nat32k_MSMSulc', 'lh_nat59k_MSMSulc', 'rh_nat59k_MSMSulc',
            'lh_LR32k_MSMSulc', 'rh_LR32k_MSMSulc', 'lh_LR59k_MSMSulc',
            'rh_LR59k_MSMSulc', 'lh_LR164k_MSMSulc', 'rh_LR164k_MSMSulc',
            'lh_native_FS', 'rh_native_FS', 'lh_nat32k_FS', 'rh_nat32k_FS',
            'lh_nat59k_FS', 'rh_nat59k_FS', 'lh_LR32k_FS', 'rh_LR32k_FS',
            'lh_LR59k_FS', 'rh_LR59k_FS', 'lh_LR164k_FS', 'rh_LR164k_FS'
        ]
    })
    # now, setup the default alignment aliases:
    if default_alignment is not None:
        for h in [
                'lh_native', 'rh_native', 'lh_nat32k', 'rh_nat32k',
                'lh_nat59k', 'rh_nat59k', 'lh_LR32k', 'rh_LR32k', 'lh_LR59k',
                'rh_LR59k', 'lh_LR164k', 'rh_LR164k'
        ]:
            hems = hems.set(
                h, curry(lambda h: hems[h + '_' + default_alignment], h))
        hems = hems.set('lh', lambda: hems['lh_native'])
        hems = hems.set('rh', lambda: hems['rh_native'])
    meta_data = pimms.persist({} if meta_data is None else meta_data)
    meta_data = meta_data.set('raw_images', fmap.data_tree.raw_image)
    if default_alignment is not None:
        meta_data = meta_data.set('default_alignment', default_alignment)
    return mri.Subject(name=name,
                       pseudo_path=fmap.pseudo_paths[None],
                       hemis=hems,
                       images=imgs,
                       meta_data=meta_data).persist()
Beispiel #10
0
 def supplemental_paths(sp):
     '''
     filemap.supplemental_paths is a map of additional paths provided to the filemap object.
     '''
     if not pimms.is_map(sp):
         raise ValueError('supplemental_paths must be a map')
     rr = {}
     for (nm, pth) in six.iteritems(sp):
         pth = FileMap.valid_path(pth)
         if pth is None:
             raise ValueError(
                 'supplemental paths must be directories or tarballs')
         rr[nm] = pth
     return pimms.persist(rr)
Beispiel #11
0
 def entry_meta_data(mds):
     '''
     lblidx.entry_meta_data is lists of meta-data maps for each of the labels in the given label
       index object.
     '''
     if mds is None: return None
     if is_dataframe(mds):
         mds = {k:mds[k].values for k in mds.colums}
     elif pimms.is_map(mds):
         ks = list(mds.keys())
         mds = [{k:v for (k,v) in zip(ks,vs)} for vs in np.transpose(list(mds.values()))]
     elif not pimms.is_array(mds) or not all(pimms.is_map(u) for u in mds):
         raise ValueError('unbalanced or non-map entry meta-data')
     return pimms.persist(mds)
    def generate_DROI_summary(DROI_table, angles=None, eccens=None):
        '''
        generate_DROI_summary(table) converts the DROI table into a summary.
        '''
        import neuropythy as ny, numpy as np
        if angles is None: angles = VisualPerformanceFieldsDataset.roi_angles
        elif angles in ['fine', 'all']:
            angles = VisualPerformanceFieldsDataset.roi_angles_fine
        # in eccens, by default, we exclude the foveal (0-1 degree) and peripheral (6-7 degree)
        # eccentricity bands.
        if eccens is None:
            eccens = VisualPerformanceFieldsDataset.roi_eccens[1:-1]
        emns = [ee[0] for ee in eccens]
        emxs = [ee[1] for ee in eccens]

        def _dfsel(df, k, ang, emns=[1, 2, 3, 4, 5], emxs=[2, 3, 4, 5, 6]):
            tbls = [
                ny.util.dataframe_select(df,
                                         angle_delta_deg=ang,
                                         min_eccentricity_deg=mn,
                                         max_eccentricity_deg=mx)
                for (mn, mx) in zip(emns, emxs)
            ]
            tbls = [tbl[['sid', 'hemisphere', k]] for tbl in tbls]
            tbl = tbls[0]
            for t in tbls[1:]:
                tt = tbl.merge(t, on=['sid', 'hemisphere'])
                tt[k] = tt[k + '_x'] + tt[k + '_y']
                tbl = tt[['sid', 'hemisphere', k]]
            tl = tbl.loc[tbl['hemisphere'] == 'lh']
            tr = tbl.loc[tbl['hemisphere'] == 'rh']
            tt = tl.merge(tr, on='sid')
            tt = tt.sort_values('sid')
            return tt[k + '_x'].values + tt[k + '_y'].values

        dat = {
            para: {
                k: pimms.imm_array([
                    _dfsel(df, k, ang, emns=emns, emxs=emxs) for ang in angles
                ])
                for k in
                ['surface_area_mm2', 'mean_thickness_mm', 'volume_mm3']
            }
            for para in [
                'horizontal', 'vertical', 'dorsal', 'ventral', 'hdorsal',
                'hventral', 'dorsal_v1', 'ventral_v1', 'dorsal_v2',
                'ventral_v2'
            ] for df in [ny.util.dataframe_select(DROI_table, boundary=para)]
        }
        return pimms.persist(dat)
 def DROI_details(subjects):
     '''
     DROI_details is a nested-dictionary structure of the various DROI details of each subject
     and hemisphere.
     '''
     import neuropythy as ny, os, six
     from neuropythy.util import curry
     f = curry(
         VisualPerformanceFieldsDataset._generate_subject_DROI_details,
         subjects)
     m = {
         sid: pimms.lmap({h: curry(f, sid, h)
                          for h in ['lh', 'rh']})
         for sid in six.iterkeys(subjects)
     }
     return pimms.persist(m)
    def asymmetry(DROI_summary):
        '''
        asymmetry is a nested dictionary structure containing the surface-area asymmetry estimates
        for each subject. The value asymmetry[k][a][sno] is the percent asymmetry between the axes
        defined by comparison name k ('HMA' for HM:VM asymmetry, 'VMA' for LVM:UVM asymmetry,
        'HVA_cumulative' for cumulative HM:VM asymmetry, or 'VMA_cumulative' for cumulative LVM:UVM
        asymmetry), subject number sno (0-180 for the HCP subject whose ID is subject_list[sno]),
        and angle-distance a (10, 20 30, 40, or 50 indicating the angle-wedge size in degrees of
        polar angle).

        Asymmetry is defined as (value1 - value2) / mean(value1, value2) where value1 and value2 are
        either the horizontal and vertical ROI surface areas respectively or the lower-vetical
        (dorsal) and upper-vertical (ventral) ROI surface areas respectively. The values reported
        in this data structure are percent asymmetry: difference / mean * 100.
        '''
        import neuropythy as ny, six, numpy as np
        quant = 'surface_area_mm2'
        dat = {}
        for (k, (k1, k2)) in zip(['HVA', 'VMA'], [('horizontal', 'vertical'),
                                                  ('dorsal', 'ventral')]):
            for iscum in [True, False]:
                # Grab and prep the data.
                ys1 = np.asarray(DROI_summary[k1][quant])
                ys2 = np.asarray(DROI_summary[k2][quant])
                if not iscum:
                    res = []
                    for ys in (ys1, ys2):
                        (cum, yr) = (0, [])
                        for yy in ys:
                            yr.append(yy - cum)
                            cum = yy
                        res.append(yr)
                    (ys1, ys2) = [np.asarray(u) for u in res]
                # Calculate the asymmetries.
                asym = []
                for (y1, y2) in zip(ys1, ys2):
                    mu = np.nanmean([y1, y2], axis=0)
                    dy = y1 - y2
                    asym.append(dy / mu * 100)
                # Append the data
                dat[k + '_cumulative' if iscum else k] = pimms.imm_array(asym)
        return pimms.persist(dat)
Beispiel #15
0
 def parameters(params):
     '''
     mdl.parameters is a persistent map of the parameters for the given SchiraModel object mdl.
     '''
     if not pimms.is_pmap(params): params = pyr.pmap(params)
     # do the translations that we need...
     scale = params['scale']
     if pimms.is_number(scale):
         params = params.set('scale', (scale, scale))
     elif not is_tuple(scale):
         params = params.set('scale', tuple(scale))
     shear = params['shear']
     if pimms.is_number(shear) and np.isclose(shear, 0):
         params = params.set('shear', ((1, 0), (0, 1)))
     elif shear[0][0] != 1 or shear[1][1] != 1:
         raise RuntimeError('shear matrix diagonal elements must be 1!')
     elif not is_tuple(shear) or not all(is_tuple(s) for s in shear):
         params.set('shear', tuple([tuple(s) for s in shear]))
     center = params['center']
     if pimms.is_number(center) and np.isclose(center, 0):
         params = params.set('center', (0.0, 0.0))
     return pimms.persist(params, depth=None)
    def boundary_distances(pseudo_path, subject_list, inferred_maps):
        '''
        boundary_distances is a nested-dictionary structure containing distances between
        each vertex and a V1 boundary. If x is boundar_distances[sid][h][b][k] then x is
        the distance between the k'th vertex and boundary b ("ventral", "dorsal", or
        "horizontal") in the h hemisphere ("lh" or "rh") of the subject with ID sid.
        '''
        import os, six
        from neuropythy.util import curry
        from neuropythy import load

        def _load_distances(sid, h):
            flnm = pseudo_path.local_path('distances', '%s_%s.mgz' % (sid, h))
            (v, d, h) = load(flnm).T
            return pimms.persist({'ventral': v, 'dorsal': d, 'horizontal': h})

        return pimms.persist({
            sid: pimms.lmap(
                {h: curry(_load_distances, sid, h)
                 for h in ['lh', 'rh']})
            for sid in subject_list
        })
    def inferred_maps(pseudo_path, subject_list):
        '''
        inferred_maps is a nested-dictionary structure containing the retinotopic maps inferred by
        using Bayesian inference on the retinotopic maps of the subjects in the HCP 7T Retinotopy
        Dataset.
        '''
        import os, six
        from neuropythy.util import curry
        from neuropythy import load
        inffiles = VisualPerformanceFieldsDataset.inferred_map_files

        def _load_infmaps(sid, h, patt):
            flnm = pseudo_path.local_path('inferred_maps', patt % (sid, h))
            return load(flnm)

        return pimms.persist({
            sid: {
                h: pimms.lmap({('inf_' + k): curry(_load_infmaps, sid, h, v)
                               for (k, v) in six.iteritems(inffiles)})
                for h in ['lh', 'rh']
            }
            for sid in subject_list
        })
Beispiel #18
0
def calc_images(subject, atlas_properties, image_template, worklog):
    '''
    calc_images is a calculator that converts the atlas properties into a similar nested map of
    atlas images.

    Afferent parameters:
      @ image_template 
        The image template object or file to be used as a template for the output image. This
        can be None (in which case 'brain' is used) or a string that identifies an image of the
        subject. Or it can be a valid filename fo an image.

    Efferent values:
      @ atlas_images 
        A nested lazy-map structure that contains the 3D images for each of the relevant atlases
        and properties.
    '''
    from neuropythy import image_clear
    # parse the image template
    if image_template is None: image_template = subject.images['brain']
    elif pimms.is_str(image_template):
        if image_template in subject.images:
            image_template = subject.images[image_template]
        else:
            try:
                image_template = ny.load(image_template, to='image')
            except Exception:
                image_template = None
    if image_template is None:
        raise ValueError('Could not load or find image template: %s' %
                         (image_template, ))
    image_template = image_clear(image_template)
    # having the image template, make the addresses:
    hemis = list(
        six.iterkeys(
            next(six.itervalues(next(six.itervalues(atlas_properties))))))
    addr = pimms.lmap({
        h: curry(lambda h: subject.hemis[h].image_address(image_template), h)
        for h in hemis
    })
    worklog('Preparing images...')

    def _make_images(vd, m):
        worklog('Constructing %s images...' % (m, ))
        dat = {h: vd[h][m] for h in six.iterkeys(vd)}
        lk = next(
            iter(
                sorted(k for k in six.iterkeys(dat)
                       if k.lower().startswith('lh'))), None)
        rk = next(
            iter(
                sorted(k for k in six.iterkeys(dat)
                       if k.lower().startswith('rh'))), None)
        idat = (dat[lk] if lk is not None else None,
                dat[rk] if rk is not None else None)
        if np.array_equal(idat, (None, None)): return None
        aa = (addr.get(lk, None), addr.get(rk, None))
        im = subject.cortex_to_image(idat,
                                     image_template,
                                     hemi=(lk, rk),
                                     address=aa)
        return im

    ims = {
        atl: {
            v: pimms.lmap(
                {m: curry(_make_images, vd, m)
                 for m in six.iterkeys(hd)})
            for (v, vd) in six.iteritems(pps)
            for hd in [next(six.itervalues(vd))]
        }
        for (atl, pps) in six.iteritems(atlas_properties)
    }
    return {
        'atlas_images': pimms.persist(ims),
        'image_template_object': image_template
    }
Beispiel #19
0
def calc_atlas_projections(subject_cortices,
                           atlas_cortices,
                           atlas_map,
                           worklog,
                           atlases=Ellipsis):
    '''
    calc_atlas_projections calculates the lazy map of atlas projections.

    Afferent parameters:
      @ atlases 
        The atlases that should be applied to the subject. This can be specified as a list/tuple of
        atlas names or as a string where the atlas names are separated by whitespace, commas, or
        semicolons. For example, to specify the 'benson14' atlas as well as the 'wang15' atlas, then
        ('benson14', 'wang15'), 'benson14 wang15' or 'benson14,wang15' would all be acceptable. To
        specify an atlas version, separate the atlas-name and the version with a colon (:), such as
        'benson14:2.5'. If no version is provided, then the highest version found is used. If
        atlases is set to None or Ellipsis (the default), this is equivalent to 'benson14,wang15'.

    Efferent values:
      @ atlas_properties 
        The atlas properties is a nested pimms lazy map whose key-path are like those of the
        atlas_map afferent parameter but which contains only those atlases requested via the atlases
        afferent parameter and whose deepest values are interpolated property vectors for the 
        target subject.
      @ atlas_version_tags 
        Each atlas can be specified as <atlas> or <atlas>:<version>; if the version is specified,
        then the version tag string (e.g., '.v1_5') is included in this dictionary; if only <atlas>
        was specified then this string is ''. If <atlas>: is specified, then the version string for
        whichever atlas was used is included.
    '''
    # Parse the atlases argument first:
    if atlases is Ellipsis: atlases = ('benson14', 'wang15')
    if pimms.is_str(atlases):
        atlases = tuple(re.split(r'([,;]|\s)+', atlases)[::2])

    def _atlas_to_atlver(atl):
        atl0 = atl
        if not pimms.is_vector(atl):
            if ':' in atl:
                atl = atl.split(':')
                if len(atl) != 2:
                    raise ValueError('Cannot parse atlas spec: %s' % atl0)
            else:
                atl = [atl, None]
        if len(atl) != 2:
            raise ValueError('Improperly specified atlas: %s' % atl0)
        if pimms.is_str(atl[1]):
            if len(atl[1]) == 0: atl = (atl[0], None)
            else:
                if atl[1][0] == 'v': atl[1] = atl[1][1:]
                try:
                    atl = (atl[0],
                           tuple([int(x)
                                  for x in re.split(r'[-_.]+', atl[1])]))
                except Exception:
                    raise ValueError(
                        'Could not parse atlas version string: %s' % atl[1])
        elif pimms.is_int(atl[1]):
            atl = (atl[0], (atl[1], ))
        elif pimms.is_real(atl[1]):
            atl = (atl[0], (
                int(atl[1]),
                int(10 * (atl[1] - int(atl[1]))),
            ))
        elif pimms.is_vector(atl[1], int):
            atl = (atl[0], tuple(atl[1]))
        elif atl[1] is not None:
            raise ValueError(
                'atlas version must be a string (like "v1_5_1") or a list of ints'
            )
        else:
            atl = tuple(atl)
        return atl + (atl0, )

    # Okay, let's find these versions of the atlases in the atlas_map...
    worklog('Preparing Atlases...')
    wl = worklog.indent()
    atl_props = AutoDict()
    avt = AutoDict()
    # keyfn is for sorting versions (newest version last)
    keyfn = lambda k: ((np.inf, ) if k is None else k + (np.inf, )
                       if len(k) == 0 or k[-1] != 0 else k)
    for (atl, version, atl0) in [_atlas_to_atlver(atl) for atl in atlases]:
        if atl not in atlas_map:
            raise ValueError('Could not find an atlas named %s' % atl)
        atldat = atlas_map[atl]
        # if the version is None, we pick the highest of the available versions
        if version is None: v = sorted(atldat.keys(), key=keyfn)[-1]
        elif version in atldat: v = version
        else:
            raise ValueError('Could not find specific version %s of atlas %s' %
                             (version, atl))
        # update the atlas-version-tag data
        wl('Atlas: %s, Version: %s' % (atl, v))
        avt[atl][v] = '' if v is None or ':' not in atl0 else (
            '.v' + '_'.join(map(str, v)))
        lmaps = atlas_map[atl][v]
        # convert these maps into interpolated properties...
        for (h, hmap) in six.iteritems(lmaps):
            hmap = pimms.lazy_map({
                m: curry(
                    lambda hmap, h, m: atlas_cortices[h].interpolate(
                        subject_cortices[h], hmap[m]), hmap, h, m)
                for m in six.iterkeys(hmap)
            })
            lmaps = lmaps.set(h, hmap)
        # add the lmaps (the persistent/lazy maps for this atlas version) in the atlprops
        atl_props[atl][v] = lmaps
    # That's all; we can return atl_props once we persist it
    return {
        'atlas_properties': pimms.persist(atl_props),
        'atlas_version_tags': pimms.persist(avt)
    }
Beispiel #20
0
 def data_hierarchy(h):
     '''
     filemap.data_hierarchy is the initial data hierarchy provided to the filemap object.
     '''
     return pimms.persist(h)
Beispiel #21
0
 def _parsed_instructions(instructions, data_hierarchy):
     return pimms.persist(
         FileMap.parse_instructions(instructions, data_hierarchy))
Beispiel #22
0
    def test_persist(self):
        '''
        test_persist() tests pimms persist() function.
        '''
        from .lazy_complex import LazyComplex

        z = LazyComplex((1.0, 2.0))
        self.assertFalse(z.is_persistent())
        self.assertTrue(z.is_transient())
        z.persist()
        self.assertTrue(z.is_persistent())
        self.assertFalse(z.is_transient())

        z = LazyComplex((1.0, 2.0))
        self.assertFalse(z.is_persistent())
        self.assertTrue(z.is_transient())
        zp = pimms.persist(z)
        self.assertTrue(zp.is_persistent())
        self.assertFalse(zp.is_transient())
        self.assertFalse(z.is_persistent())
        self.assertTrue(z.is_transient())

        m0 = {
            'a': [1, 2, 3],
            'b': (2, 3, 4),
            'c': {
                'd': 'abc',
                'e': set(['def', 'ghi']),
                'f': frozenset([10, 11, 12])
            },
            'z':
            z,
            'zp':
            zp,
            'q': (1, 2, [3, 4]),
            't':
            pimms.itable({
                'c1': range(10),
                'c2': range(1, 11),
                'c3': range(2, 12)
            })
        }
        m = pimms.persist(m0)
        self.assertIs(m['b'], m0['b'])
        self.assertIsNot(m['a'], m0['a'])
        self.assertTrue(all(ai == bi for (ai, bi) in zip(m['a'], m0['a'])))
        self.assertTrue(pimms.is_pmap(m['c']))
        self.assertIs(m['c']['d'], m0['c']['d'])
        self.assertTrue(isinstance(m['c']['e'], frozenset))
        self.assertTrue(isinstance(m['c']['f'], frozenset))
        self.assertTrue(
            all(ai == bi for (ai, bi) in zip(m['c']['f'], m0['c']['f'])))
        self.assertTrue(m['z'].is_persistent())
        self.assertIs(m['zp'], m0['zp'])
        self.assertIs(m['q'], m0['q'])
        self.assertIs(m['q'][2], m0['q'][2])
        self.assertTrue(pimms.is_itable(m['t']))
        self.assertTrue(m['t'].is_persistent())
        m = pimms.persist(m0, depth=1)
        self.assertIs(m['b'], m0['b'])
        self.assertIsNot(m['a'], m0['a'])
        self.assertTrue(all(ai == bi for (ai, bi) in zip(m['a'], m0['a'])))
        self.assertTrue(pimms.is_pmap(m['c']))
        self.assertIs(m['c']['d'], m0['c']['d'])
        self.assertTrue(isinstance(m['c']['e'], set))
        self.assertTrue(isinstance(m['c']['f'], frozenset))
        self.assertTrue(
            all(ai == bi for (ai, bi) in zip(m['c']['f'], m0['c']['f'])))
        self.assertTrue(m['z'].is_persistent())
        self.assertIs(m['zp'], m0['zp'])
        self.assertIs(m['q'], m0['q'])
        self.assertIs(m['q'][2], m0['q'][2])
        self.assertTrue(pimms.is_itable(m['t']))
        self.assertTrue(m['t'].is_persistent())
Beispiel #23
0
def calc_filemap(subject,
                 worklog,
                 radtan_cmag,
                 spotlight_cmag,
                 clean_retinotopy,
                 no_clean=False,
                 output_path=None,
                 overwrite=False,
                 output_format='mgz',
                 create_directory=False):
    '''
    calc_filemap is a calculator that prepares the calculated cortical magnification data and the
    cleaned retinotopic maps for exporting.

    Afferent parameters
      @ output_path 
        The directory into which the atlas files should be written. If not provided or None then
        uses the subject's surf directory. If this directory doesn't exist, then it uses the
        subject's directory itself.
      @ overwrite 
        Whether to overwrite existing atlas files. If True, then atlas files that already exist will
        be overwritten. If False, then no files are overwritten.
      @ create_directory 
        Whether to create the output path if it doesn't exist. This is False by default.
      @ output_format 
        The desired output format of the files to be written. May be one of the following: 'mgz',
        'mgh', or either 'curv' or 'morph'.

    Efferent values:
      @ filemap 
        A pimms lazy map whose keys are filenames and whose values are interpolated atlas
        properties.
      @ export_all_fn 
        A function of no arguments that, when called, exports all of the files in the filemap to the
        output_path.
    '''
    if output_path is None:
        output_path = os.path.join(subject.path, 'surf')
        if not os.path.isdir(output_path): output_path = subject.path
    output_format = 'mgz' if output_format is None else output_format.lower()
    if output_format.startswith('.'): output_format = output_format[1:]
    (fmt, ending) = (('mgh', '.mgz') if output_format == 'mgz' else
                     ('mgh', '.mgh') if output_format == 'mgh' else
                     ('freesurfer_morph', ''))
    # make the filemap...
    worklog('Preparing Filemap...')
    fm = {}
    for (h, cl) in six.iteritems(clean_retinotopy if not no_clean else {}):
        for (k, val) in six.iteritems(cl):
            flnm = ('%s.clean_%s.' + output_format) % (h, k)
            fm[flnm] = val
    for (h,
         cm) in six.iteritems(radtan_cmag if radtan_cmag is not None else {}):
        flnm = ('%s.rad_cmag.' % h) + output_format
        fm[flnm] = cm[0]
        flnm = ('%s.tan_cmag.' % h) + output_format
        fm[flnm] = cm[1]
    for (h, cm) in six.iteritems(
            spotlight_cmag if spotlight_cmag is not None else {}):
        flnm = ('%s.spot_cmag.' % h) + output_format
        fm[flnm] = cm
    # okay, make that a persistent map:
    filemap = pimms.persist(fm)
    output_path = os.path.expanduser(os.path.expandvars(output_path))

    # the function for exporting all properties:
    def export_all():
        '''
        This function will export all files from its associated filemap and return a list of the
        filenames.
        '''
        if not os.path.isdir(output_path):
            if not create_directory:
                raise ValueError(
                    'No such path and create_direcotry is False: %s' %
                    output_path)
            os.makedirs(os.path.abspath(output_path), 0o755)
        filenames = []
        worklog('Writing Files...')
        wl = worklog.indent()
        for (flnm, val) in six.iteritems(filemap):
            flnm = os.path.join(output_path, flnm)
            wl(flnm)
            filenames.append(nyio.save(flnm, val, fmt))
        return filenames

    return {'filemap': filemap, 'export_all_fn': export_all}
Beispiel #24
0
def calc_retinotopy(hemisphere_data,
                    hemisphere_tags,
                    label_data,
                    angles='*.prf_angle.mgz',
                    eccens='*.prf_eccen.mgz',
                    weights='*.prf_vexpl.mgz'):
    '''
    calc_retinotopy imports the raw retinotopy data for the given subject.

    Afferent parameters:
      @ angles 
        The filenames of the polar-angle files that are needed for each hemisphere. For more 
        information on how these files are specified, see the help text for the labels parameter.
        If angles is not supplied, then the default value is '*.prf_angle.mgz'. Polar angles
        MUST be encoded in clockwise degrees of rotation starting from the positive y-axis.
      @ eccens 
        The filenames of the eccentricity files that are needed for each hemisphere. For more 
        information on how these files are specified, see the help text for the labels parameter.
        If eccens is not supplied, then the default value is '*.prf_eccen.mgz'. Eccentricity
        MUST be encoded in degrees of visual angle.
      @ weights 
        The filenames of the weights (usually fraction of variance explained) files that are needed
        for each hemisphere. For more innformation on how these files are specified, see the help
        text for the labels parameters. If eccens is not supplied, then the default value is
        '*.prf_vexpl.mgz'. Variance explained should be encoded as a fraction with 1 indicating
        100% variance explained.
    '''
    retino = {}
    for (k, val) in zip(['angle', 'eccen', 'weight'],
                        [angles, eccens, weights]):
        if not pimms.is_str(val):
            raise ValueError('could not understand non-string %ss' % k)
        # first, it might just be a template pattern
        fls = {}
        if '*' in val:
            sparts = val.split('*')
            for h in hemisphere_tags:
                flnm = h.join(sparts)
                fls[h] = os.path.expanduser(os.path.expandvars(flnm))
        else:
            # okay, separate commas...
            lsplit = val.split(',')
            for (kk, l) in enumerate(lsplit):
                if ':' in l: (tag, name) = l.split(':')
                elif kk < len(hemisphere_tags):
                    (tag, name) = (hemisphere_tags[kk], l)
                else:
                    raise ValueError('could not match %ss to hemispheres' %
                                     (k, ))
                if tag not in hemisphere_data:
                    raise ValueError('Tag %s (in %ss arg) does not exist' %
                                     (tag, k))
                fls[tag] = os.path.expanduser(os.path.expandvars(name))
        retino[k] = fls
    # now go through and load them
    res = {}
    for (k, fls) in six.iteritems(retino):
        rr = {}
        for (tag, name) in six.iteritems(fls):
            if not os.path.isfile(name):
                raise ValueError('%ss filename %s not found' % (
                    k,
                    name,
                ))
            hem = hemisphere_data[tag]
            tmp = nyio.load(name)
            if not pimms.is_vector(tmp) or len(tmp) != hem.vertex_count:
                raise ValueError('%ss file %s does not contain label data' % (
                    k,
                    name,
                ))
            rr[tag] = np.asarray(tmp)
        res[k] = rr
    return {'raw_retinotopy': pimms.persist(res)}
Beispiel #25
0
def mag_data(hemi, retinotopy='any', surface='midgray', mask=None,
             weights=Ellipsis, weight_min=0, weight_transform=Ellipsis,
             visual_area=None, visual_area_mask=Ellipsis,
             eccentricity_range=None, polar_angle_range=None):
    '''
    mag_data(hemi) yields a map of visual/cortical magnification data for the given hemisphere.
    mag_data(mesh) uses the given mesh.
    mag_data([arg1, arg2...]) maps over the given hemisphere or mesh arguments.
    mag_data(subject) is equivalent to mag_data([subject.lh, subject.rh]).
    mag_data(mdata) for a valid magnification data map mdata (i.e., is_mag_data(mdata) is True or
      mdata is a lazy map with integer keys) always yields mdata without considering any additional
      arguments.

    The data structure returned by magdata is a lazy map containing the keys:
      * 'surface_coordinates': a (2 x N) or (3 x N) matrix of the mesh coordinates in the mask
        (usually in mm).
      * 'visual_coordinates': a (2 x N) matrix of the (x,y) visual field coordinates (in degrees).
      * 'surface_areas': a length N vector of the surface areas of the faces in the mesh.
      * 'visual_areas': a length N vector of the areas of the faces in the visual field.
      * 'mesh': the full mesh from which the surface coordinates are obtained.
      * 'submesh': the submesh of mesh of just the vertices in the mask (may be identical to mesh).
      * 'mask': the mask used.
      * 'retinotopy_data': the full set of retinotopy_data from the hemi/mesh; note that this will
        include the key 'weights' of the weights actually used and 'visual_area' of the found or
        specified visual area.
      * 'masked_data': the subsampled retinotopy data from the hemi/mesh.
    Note that if a visual_area property is found or provided (see options below), instead of
    yielding a map of the above, a lazy map whose keys are the visual areas and whose values are the
    maps described above is yielded instead.

    The following named options are accepted (in order):
      * retinotopy ('any') specifies the value passed to the retinotopy_data function to obtain the
        retinotopic mapping data; this may be a map of such data.
      * surface ('midgray') specifies the surface to use.
      * mask (None) specifies the mask to use.
      * weights, weight_min, weight_transform (Ellipsis, 0, Ellipsis) are used as in the
        to_property() function  in neuropythy.geometry except weights, which, if equal to Ellipsis,
        attempts to use the weights found by retinotopy_data() if any.
      * visual_area (Ellipsis) specifies the property to use for the visual area label; Ellipsis is
        equivalent to whatever visual area label is found by the retinotopy_data() function if any.
      * visual_area_mask (Ellipsis) specifies which visual areas to include in the returned maps,
        assuming a visual_area property is found; Ellipsis is equivalent to everything but 0; None
        is equivalent to everything.
      * eccentricity_range (None) specifies the eccentricity range to include.
      * polar_angle_range (None) specifies the polar_angle_range to include.
    '''
    if is_mag_data(hemi): return hemi
    elif pimms.is_lazy_map(hemi) and pimms.is_vector(hemi.keys(), 'int'): return hemi
    if mri.is_subject(hemi): hemi = (hemi.lh. hemi.rh)
    if pimms.is_vector(hemi):
        return tuple([mag_data(h, retinotopy=retinotopy, surface=surface, mask=mask,
                               weights=weights, weight_min=weight_min,
                               weight_transform=weight_transform, visual_area=visual_area,
                               visual_area_mask=visual_area_mask,
                               eccentricity_range=eccentricity_range,
                               polar_angle_range=polar_angle_range)
                      for h in hemi])
    # get the mesh
    mesh = geo.to_mesh((hemi, surface))
    # First, find the retino data
    retino = retinotopy_data(hemi, retinotopy)
    # we can process the rest the mask now, including weights and ranges
    if weights is Ellipsis: weights = retino.get('variance_explained', None)
    mask = hemi.mask(mask, indices=True)
    (arng,erng) = (polar_angle_range, eccentricity_range)
    (ang,ecc) = (retino['polar_angle'], retino['eccentricity'])
    if pimms.is_str(arng):
        tmp = to_hemi_str(arng)
        arng = (-180,0) if tmp == 'rh' else (0,180) if tmp == 'lh' else (-180,180)
    elif arng is None:
        tmp = ang[mask]
        tmp = tmp[np.isfinite(tmp)]
        arng = (np.min(tmp), np.max(tmp))
    if erng is None:
        tmp = ecc[mask]
        tmp = tmp[np.isfinite(tmp)]
        erng = (0, np.max(tmp))
    elif pimms.is_scalar(erng): erng = (0, erng)
    (ang,wgt) = hemi.property(retino['polar_angle'], weights=weights, weight_min=weight_min,
                              weight_transform=weight_transform, yield_weight=True)
    ecc = hemi.property(retino['eccentricity'], weights=weights, weight_min=weight_min,
                        weight_transform=weight_transform, data_range=erng)
    # apply angle range if given
    ((mn,mx),mid) = (arng, np.mean(arng))
    oks = mask[np.isfinite(ang[mask])]
    u = ang[oks]
    u = np.mod(u + 180 - mid, 360) - 180 + mid
    ang[oks[np.where((mn <= u) & (u < mx))[0]]] = np.inf
    # mark/unify the out-of-range ones
    bad = np.where(np.isinf(ang) | np.isinf(ecc))[0]
    ang[bad] = np.inf
    ecc[bad] = np.inf
    wgt[bad] = 0
    wgt *= zinv(np.sum(wgt[mask]))
    # get visual and surface coords
    vcoords = np.asarray(as_retinotopy(retino, 'geographical'))
    scoords = mesh.coordinates
    # now figure out the visual area so we can call down if we need to
    if visual_area is Ellipsis: visual_area = retino.get('visual_area', None)
    if visual_area is not None: retino['visual_area'] = visual_area
    if wgt is not None: retino['weights'] = wgt
    rdata = pimms.persist(retino)
    # calculate the range area
    (tmn,tmx) = [np.pi/180.0 * u for u in arng]
    if tmx - tmn >= 2*np.pi: (tmn,tmx) = (-np.pi,np.pi)
    (emn,emx) = erng
    rarea = 0.5 * (emx*emx - emn*emn) * (tmx - tmn)
    # okay, we have the data organized; we can do the calculation based on this, but we may have a
    # visual area mask to apply as well; here's how we do it regardless of mask
    def finish_mag_data(mask):
        if len(mask) == 0: return None
        # now that we have the mask, we can subsample
        submesh = mesh.submesh(mask)
        mask = mesh.tess.index(submesh.labels)
        mdata = pyr.pmap({k:(v[mask]   if pimms.is_vector(v) else
                             v[:,mask] if pimms.is_matrix(v) else
                             None)
                          for (k,v) in six.iteritems(rdata)})
        fs = submesh.tess.indexed_faces
        (vx, sx)  = [x[:,mask]                        for x in (vcoords, scoords)]
        (vfx,sfx) = [np.asarray([x[:,f] for f in fs]) for x in (vx,      sx)]
        (va, sa)  = [geo.triangle_area(*x)            for x in (vfx, sfx)]
        return pyr.m(surface_coordinates=sx, visual_coordinates=vx,
                     surface_areas=sa,       visual_areas=va,
                     mesh=mesh,              submesh=submesh,
                     retinotopy_data=rdata,  masked_data=mdata,
                     mask=mask,              area_of_range=rarea)
    # if there's no visal area, we just use the mask as is
    if visual_area is None: return finish_mag_data(mask)
    # otherwise, we return a lazy map of the visual area mask values
    visual_area = hemi.property(visual_area, mask=mask, null=0, dtype=np.int)
    vam = (np.unique(visual_area)                    if visual_area_mask is None     else
           np.setdiff1d(np.unique(visual_area), [0]) if visual_area_mask is Ellipsis else
           np.unique(list(visual_area_mask)))
    return pimms.lazy_map({va: curry(finish_mag_data, mask[visual_area[mask] == va])
                           for va in vam})
Beispiel #26
0
class HCPRetinotopyDataset(Dataset):
    '''
    neuropythy.data['hcp_retinotopy'] is a Dataset containing the publicly provided data from the
    Benson et al. (2018; DOI:10.1167/18.13.23) paper on the HCP 7T retinotopy dataset. For more
    information see the paper's OSF site (https://osf.io/bw9ec/)

    You do not have to explicitly configure anything in order to use this dataset: the default
    behavior is to use the the same cache directory as the 'hcp' dataset, meaning that your subject
    directories will gain 'retinotopy' subdirectories containing cached mgz files. These
    'retinotopy' subdirectories exist at the same level as 'MNINonLinear' and 'T1w'.

    It is recommended that you interact with the data via the neuropythy.data['hcp'] dataset--this
    dataset stores the structural data necessary to make sense of the retinotopic data and will
    automatically include the relevant retinotopic data as properties attached to any subject
    requested that has retinotopic data.

    If you wish to explicitly disable hcp_retinotopy downloading, you can do so by setting the
    'hcp_auto_download' config item to either False or 'structure' (indicating that structural
    downloads should continue but retinotopic ones should not). Note that this latter setting does
    not actually prevent you from inducing downloading via the 'hcp_retinotopy' dataset directly,
    but it will prevent the 'hcp' dataset from inducing such downloads.

    The default behavior of the hcp_retinotopy dataset is to put cache files in the same directories
    as the auto-downloaded HCP subject data; a separate cache directory can be provided via the
    neuropythy hcp_retinotopy_cache_path config item.
    '''
    default_url = 'osf://bw9ec/'
    retinotopy_files = pyr.pmap({
        32: 'prfresults.mat',
        59: 'prfresults59k.mat'
    })
    retinotopy_prefix = 'prf'
    lowres_retinotopy_prefix = 'lowres-prf'
    highres_retinotopy_prefix = 'highres-prf'
    subject_ids = tuple([
        100610, 102311, 102816, 104416, 105923, 108323, 109123, 111312, 111514,
        114823, 115017, 115825, 116726, 118225, 125525, 126426, 128935, 130114,
        130518, 131217, 131722, 132118, 134627, 134829, 135124, 137128, 140117,
        144226, 145834, 146129, 146432, 146735, 146937, 148133, 150423, 155938,
        156334, 157336, 158035, 158136, 159239, 162935, 164131, 164636, 165436,
        167036, 167440, 169040, 169343, 169444, 169747, 171633, 172130, 173334,
        175237, 176542, 177140, 177645, 177746, 178142, 178243, 178647, 180533,
        181232, 181636, 182436, 182739, 185442, 186949, 187345, 191033, 191336,
        191841, 192439, 192641, 193845, 195041, 196144, 197348, 198653, 199655,
        200210, 200311, 200614, 201515, 203418, 204521, 205220, 209228, 212419,
        214019, 214524, 221319, 233326, 239136, 246133, 249947, 251833, 257845,
        263436, 283543, 318637, 320826, 330324, 346137, 352738, 360030, 365343,
        380036, 381038, 385046, 389357, 393247, 395756, 397760, 401422, 406836,
        412528, 429040, 436845, 463040, 467351, 525541, 536647, 541943, 547046,
        550439, 552241, 562345, 572045, 573249, 581450, 585256, 601127, 617748,
        627549, 638049, 644246, 654552, 671855, 680957, 690152, 706040, 724446,
        725751, 732243, 751550, 757764, 765864, 770352, 771354, 782561, 783462,
        789373, 814649, 818859, 825048, 826353, 833249, 859671, 861456, 871762,
        872764, 878776, 878877, 898176, 899885, 901139, 901442, 905147, 910241,
        926862, 927359, 942658, 943862, 951457, 958976, 966975, 971160, 973770,
        995174, 999997, 999998, 999999
    ])

    # these expect value % (hemi, alignment, surfacename)
    _retinotopy_cache_tr = pimms.persist({
        # the native filename's here aren't actually used; they would also include a '.linear.' for
        # linear interpolation right before the 'native59k' or 'native32k' (these are the filenames
        # for nearest interpolation)
        'native': {
            (highres_retinotopy_prefix + '_polar_angle'):
            '%s.split%s_angle.%s.native59k.mgz',
            (highres_retinotopy_prefix + '_eccentricity'):
            '%s.split%s_eccen.%s.native59k.mgz',
            (highres_retinotopy_prefix + '_radius'):
            '%s.split%s_prfsz.%s.native59k.mgz',
            (highres_retinotopy_prefix + '_variance_explained'):
            '%s.split%s_vexpl.%s.native59k.mgz',
            (highres_retinotopy_prefix + '_mean_signal'):
            '%s.split%s_means.%s.native59k.mgz',
            (highres_retinotopy_prefix + '_gain'):
            '%s.split%s_const.%s.native59k.mgz',
            (lowres_retinotopy_prefix + '_polar_angle'):
            '%s.split%s_angle.%s.native32k.mgz',
            (lowres_retinotopy_prefix + '_eccentricity'):
            '%s.split%s_eccen.%s.native32k.mgz',
            (lowres_retinotopy_prefix + '_radius'):
            '%s.split%s_prfsz.%s.native32k.mgz',
            (lowres_retinotopy_prefix + '_variance_explained'):
            '%s.split%s_vexpl.%s.native32k.mgz',
            (lowres_retinotopy_prefix + '_mean_signal'):
            '%s.split%s_means.%s.native32k.mgz',
            (lowres_retinotopy_prefix + '_gain'):
            '%s.split%s_const.%s.native32k.mgz'
        },
        'LR32k': {
            (lowres_retinotopy_prefix + '_polar_angle'):
            '%s.split%s_angle.32k.mgz',
            (lowres_retinotopy_prefix + '_eccentricity'):
            '%s.split%s_eccen.32k.mgz',
            (lowres_retinotopy_prefix + '_radius'): '%s.split%s_prfsz.32k.mgz',
            (lowres_retinotopy_prefix + '_variance_explained'):
            '%s.split%s_vexpl.32k.mgz',
            (lowres_retinotopy_prefix + '_mean_signal'):
            '%s.split%s_means.32k.mgz',
            (lowres_retinotopy_prefix + '_gain'): '%s.split%s_const.32k.mgz'
        },
        'LR59k': {
            (highres_retinotopy_prefix + '_polar_angle'):
            '%s.split%s_angle.59k.mgz',
            (highres_retinotopy_prefix + '_eccentricity'):
            '%s.split%s_eccen.59k.mgz',
            (highres_retinotopy_prefix + '_radius'):
            '%s.split%s_prfsz.59k.mgz',
            (highres_retinotopy_prefix + '_variance_explained'):
            '%s.split%s_vexpl.59k.mgz',
            (highres_retinotopy_prefix + '_mean_signal'):
            '%s.split%s_means.59k.mgz',
            (highres_retinotopy_prefix + '_gain'): '%s.split%s_const.59k.mgz'
        }
    })

    def __init__(self,
                 url=Ellipsis,
                 cache_directory=Ellipsis,
                 interpolation_method=Ellipsis,
                 meta_data=None,
                 create_directories=True,
                 create_mode=0o755):
        cdir = cache_directory
        if cdir is Ellipsis: cdir = config['hcp_auto_path']
        if cdir is Ellipsis:
            cdir = config['hcp_subject_paths']
            if cdir is not None: cdir = next(iter(cdir), None)
        Dataset.__init__(self,
                         'hcp_retinotopy',
                         meta_data=meta_data,
                         custom_directory=cdir,
                         create_directories=create_directories,
                         create_mode=create_mode)
        if url is Ellipsis: url = HCPRetinotopyDataset.default_url
        self.url = url
        self.interpolation_method = interpolation_method

    @pimms.param
    def url(u):
        '''
        ny.data['hcp_retinotopy'].url is the url from which the retinotopy data is loaded.
        '''
        if not pimms.is_str(u): raise ValueError('url must be a string')
        return u

    @pimms.param
    def interpolation_method(im):
        '''
        ny.data['hcp_retinotopy'].interpolation_method is a string, either 'nearest' (default) or
        'linear', which specifies whether nearest or linear interpolation should be used when
        interpolating retinotopy data from the fs_LR meshes onto the native meshes.
        '''
        if im is Ellipsis or im is None:
            return config['hcp_retinotopy_interpolation_method']
        else:
            return to_interpolation_method(im)

    @pimms.value
    def pseudo_path(url, cache_directory):
        '''
        ny.data['hcp_retinotopy'].pseudo_path is the psueod-path object responsible for loading the
        retinotopy data.
        '''
        return pseudo_path(url, cache_path=cache_directory).persist()

    @pimms.value
    def cifti_data(pseudo_path):
        '''
        ny.data['hcp_retinotopy'].cifti_data is a tuple of lazy maps of the 32k and 59k data arrays,
        reorganized into 'visual' retinotopic coordinates. The tuple elements represent the
        (full, split1, split2) solutions.
        '''

        # our loader function:
        def _load(res, split):
            import h5py
            flnm = HCPRetinotopyDataset.retinotopy_files[res]
            logging.info(
                'HCPRetinotopyDataset: Loading split %d from file %s...' %
                (split, flnm))
            flnm = pseudo_path.local_path(flnm)
            with h5py.File(flnm, 'r') as f:
                sids = np.array(f['subjectids'][0], dtype='int')
                data = np.array(f['allresults'][split])
            sids.setflags(write=False)
            # convert these into something more coherent
            tmp = hcp.cifti_split(data)
            for q in tmp:
                q.setflags(write=False)
            return pyr.pmap({
                h:
                pyr.m(prf_polar_angle=np.mod(90 - dat[:, 0] + 180, 360) - 180,
                      prf_eccentricity=dat[:, 1],
                      prf_radius=dat[:, 5],
                      prf_variance_explained=dat[:, 4] / 100.0,
                      prf_mean_signal=dat[:, 3],
                      prf_gain=dat[:, 2],
                      prf_x=dat[:, 1] * np.cos(np.pi / 180 * dat[:, 0]),
                      prf_y=dat[:, 1] * np.sin(np.pi / 180 * dat[:, 0]))
                for (h, dat) in zip(['lh', 'rh', 'subcortical'], tmp)
            })

        splits = [
            pimms.lazy_map({
                res: curry(_load, res, split)
                for res in six.iterkeys(HCPRetinotopyDataset.retinotopy_files)
            }) for split in [0, 1, 2]
        ]
        return tuple(splits)

    @pimms.value
    def subject_order(pseudo_path):
        '''
        subject_order is a mapping of subject ids to the offset at which they appear in the cifti
        data.
        '''
        import h5py

        def _load(res):
            flnm = HCPRetinotopyDataset.retinotopy_files[res]
            flnm = pseudo_path.local_path(flnm)
            logging.info(
                'HCPRetinotopyDataset: Loading subjects from file %s...' %
                flnm)
            with h5py.File(flnm, 'r') as f:
                sids = np.array(f['subjectids'][0], dtype='int')
            return pyr.pmap({sid: ii for (ii, sid) in enumerate(sids)})

        return pimms.lazy_map({
            res: curry(_load, res)
            for res in six.iterkeys(HCPRetinotopyDataset.retinotopy_files)
        })

    @pimms.value
    def retinotopy_data(cifti_data, subject_order, cache_directory,
                        create_directories, create_mode):
        '''
        ny.data['hcp_retinotopy'].retinotopy_data is a nested-map data structure representing the
        retinotopic mapping data for each subject. The first layer of keys in retinotopy_data is
        the subject id, including the 99999* average subjects. The second layer of keys is the name
        of the hemisphere, and the third layer of keys are property names (the final layer is always
        constructed of lazy maps).
        '''
        rpfx = HCPRetinotopyDataset.retinotopy_prefix
        sids = HCPRetinotopyDataset.subject_ids

        # how we load data:
        def _load_LR(split, sid, h, res, prop, flpatt):
            pth = os.path.join(cache_directory, str(sid), 'retinotopy')
            if not os.path.exists(pth) and create_directories:
                os.makedirs(pth, create_mode)
            flnm = os.path.join(pth, flpatt % (h, split))
            # deduce the generic property name from the given prop name
            pp = rpfx + prop.split(rpfx)[1]
            # as a general rule: we don't want to touch cifti_split lazy-maps because they force the
            # entire .mat file to load... BUT if we have already loaded it, we're better off sharing
            # the loaded data instead of reloading cache
            # parse the prop -- we can remove the front of it
            cdat = cifti_data[split]
            okwrite = True
            sii = subject_order[res][sid]
            if not cdat.is_lazy(res) or not os.path.isfile(flnm):
                dat = cdat[res][h][pp][sii]
            else:
                try:
                    return nyio.load(flnm)
                except Exception:
                    warnings.warn(
                        'failed to load HCP retinotopy cache file %s' % flnm)
                    okwrite = False
                dat = cdat[res][h][pp][sii]
            if okwrite: nyio.save(flnm, dat)
            return dat

        # okay, the base properties for the 32k and 59k meshes:
        trs = HCPRetinotopyDataset._retinotopy_cache_tr
        base_props = {
            sid: {
                h: {
                    split: {
                        res: pimms.lazy_map({
                            p: curry(_load_LR, split, sid, h, res, p, flp)
                            for (p, flp) in six.iteritems(trs['LR%dk' % res])
                        })
                        for res in [32, 59]
                    }
                    for split in [0, 1, 2]
                }
                for h in ['lh', 'rh']
            }
            for sid in sids
        }

        # okay, now we build on top of these: we add in the x/y properties
        def _add_xy(dat):
            k = next(six.iterkeys(dat))
            prefix = k.split('_')[0] + '_'
            (ang,
             ecc) = [dat[prefix + k] for k in ('polar_angle', 'eccentricity')]
            tht = np.pi / 180 * (90 - ang)
            (x, y) = [ecc * np.cos(tht), ecc * np.sin(tht)]
            return pimms.assoc(dat, prefix + 'x', x, prefix + 'y', y)

        xy_props = {
            sid: {
                h: {
                    split: pimms.lazy_map({
                        res: curry(_add_xy, rdat)
                        for (res, rdat) in six.iteritems(spdat)
                    })
                    for (split, spdat) in six.iteritems(hdat)
                }
                for (h, hdat) in six.iteritems(sdat)
            }
            for (sid, sdat) in six.iteritems(base_props)
        }

        # okay, that's it; just organize it into the desired shape
        def _reorg(hdat, res, ks):
            pfx = ks[0].split('_')[0] + '_'
            ks = ks + [pfx + 'x', pfx + 'y']
            f = lambda s, k: s[res][k]
            return pimms.lazy_map({
                (pre + k): curry(f, s, k)
                for k in ks for (s, pre) in zip([hdat[0], hdat[1], hdat[2]],
                                                ['', 'split1-', 'split2-'])
            })

        r = {
            sid: {('%s_LR%dk' % (h, res)):
                  _reorg(hdat, res, list(base_props[sid][h][0][res].keys()))
                  for (h, hdat) in six.iteritems(sdat) for res in [32, 59]}
            for (sid, sdat) in six.iteritems(xy_props)
        }
        return pimms.persist(r)

    @pimms.value
    def subjects(retinotopy_data, cache_directory, create_mode,
                 create_directories, interpolation_method):
        '''
        hcp_retinotopy.subjects is a lazy map whose keys are HCP subject IDs (of those subjects with
        valid retinotopic mapping data) and whose values are subject objects (obtained from the
        ny.data['hcp']._subjects map) with the addition of retinotopic mapping properties.
        '''
        alltrs = HCPRetinotopyDataset._retinotopy_cache_tr
        lrtrs = {32: alltrs['LR32k'], 59: alltrs['LR59k']}
        lpfx = HCPRetinotopyDataset.lowres_retinotopy_prefix
        hpfx = HCPRetinotopyDataset.highres_retinotopy_prefix
        nttrs = {
            32: {
                k: v
                for (k, v) in six.iteritems(alltrs['native'])
                if k.startswith(lpfx)
            },
            59: {
                k: v
                for (k, v) in six.iteritems(alltrs['native'])
                if k.startswith(hpfx)
            }
        }

        def _prep(sid):
            # see if we can get the subject from the 'hcp' dataset:
            try:
                from neuropythy import data
                hcp = data['hcp']
                sub = hcp._subjects[sid]
            except Exception:
                hcp = None
            if hcp is None:
                raise ValueError('could not load subject object for sid %s' %
                                 (sid, ))
            # okay, we need to prep this subject; the initial part is easy: copy the properties
            # for the fs_LR meshes over to them:
            hems = hems0 = sub.hemis

            def _add_LR(hnm, res):
                hem = hems0[hnm]
                return hem.with_prop(retinotopy_data[sid]['%s_LR%dk' %
                                                          (hnm[:2], res)])

            for h in ['lh', 'rh']:
                for res in [32, 59]:
                    for align in ['MSMAll', 'MSMSulc']:
                        hnm = '%s_LR%dk_%s' % (h, res, align)
                        hems = hems.set(hnm, curry(_add_LR, hnm, res))
            # okay, we have the data transferred over the the fs_LR hemispheres now; we just need
            # to do some interpolation for the native hemispheres
            hems1 = hems

            def _get(inp, k):
                return inp[k]

            def _interp_hem(hemi, h, res, align):
                pfls = nttrs[res]
                pfx = next(six.iterkeys(pfls)).split('_')[0] + '_'
                inpargs = dict(native_hemi=hemi,
                               fs_LR_hemi=hems1['%s_LR%dk_%s' %
                                                (h, res, align)],
                               subject_id=sid,
                               method=interpolation_method,
                               resolution=res,
                               alignment=align,
                               cache_directory=cache_directory)
                lm = pimms.lazy_map({
                    (p + k): curry(_get, inp, k)
                    for spl in ['', 'split1-', 'split2-'] for p in [spl + pfx]
                    for inp in
                    [interpolate_native_properties(prefix=p, **inpargs)]
                    for k in hcp_retinotopy_property_names
                })
                return hemi.with_prop(lm)

            def _get_highest_res(hemi, k):
                try:
                    x = hemi.prop(hpfx + '_' + k)
                except Exception:
                    x = None
                if x is not None: return x
                try:
                    return hemi.prop(lpfx + '_' + k)
                except Exception:
                    x = None
                if x is not None: return x
                raise ValueError('no retinotopy successfully loaded for hemi',
                                 hemi)

            def _interp_nat(h, align):
                # okay, let's get the hemisphere we are modifying...
                hemi = hems1['%s_native_%s' % (h, align)]
                # we're going to interp from both the 32k and 59k meshes (if possible)
                for res in [32, 59]:
                    hemi = _interp_hem(hemi, h, res, align)
                # add the 'best/standard' prf header:
                pfx = HCPRetinotopyDataset.retinotopy_prefix
                lm = pimms.lazy_map({(pfx + '_' + k):
                                     curry(_get_highest_res, hemi, k)
                                     for k in hcp_retinotopy_property_names})
                # return with these properties:
                return hemi.with_prop(lm)

            for h in ['lh', 'rh']:
                for align in ['MSMAll', 'MSMSulc']:
                    hems = hems.set(h + '_native_' + align,
                                    curry(_interp_nat, h, align))
            # fix the hemisphere aliases based on default alignment:
            default_alignment = sub.meta_data.get(
                'default_alignment', config['hcp_default_alignment'])
            hems2 = hems
            for h in [
                    'lh_native', 'rh_native', 'lh_LR32k', 'rh_LR32k',
                    'lh_LR59k', 'rh_LR59k', 'lh_LR164k', 'rh_LR164k'
            ]:
                hems = hems.set(
                    h, curry(lambda h: hems2[h + '_' + default_alignment], h))
            hems = hems.set('lh',
                            lambda: hems2['lh_native_' + default_alignment])
            hems = hems.set('rh',
                            lambda: hems2['rh_native_' + default_alignment])
            return sub.copy(hemis=hems)

        # we just need to call down to this prep function lazily:
        return pimms.lazy_map(
            {sid: curry(_prep, sid)
             for sid in six.iterkeys(retinotopy_data)})
Beispiel #27
0
    def retinotopy_data(cifti_data, subject_order, cache_directory,
                        create_directories, create_mode):
        '''
        ny.data['hcp_retinotopy'].retinotopy_data is a nested-map data structure representing the
        retinotopic mapping data for each subject. The first layer of keys in retinotopy_data is
        the subject id, including the 99999* average subjects. The second layer of keys is the name
        of the hemisphere, and the third layer of keys are property names (the final layer is always
        constructed of lazy maps).
        '''
        rpfx = HCPRetinotopyDataset.retinotopy_prefix
        sids = HCPRetinotopyDataset.subject_ids

        # how we load data:
        def _load_LR(split, sid, h, res, prop, flpatt):
            pth = os.path.join(cache_directory, str(sid), 'retinotopy')
            if not os.path.exists(pth) and create_directories:
                os.makedirs(pth, create_mode)
            flnm = os.path.join(pth, flpatt % (h, split))
            # deduce the generic property name from the given prop name
            pp = rpfx + prop.split(rpfx)[1]
            # as a general rule: we don't want to touch cifti_split lazy-maps because they force the
            # entire .mat file to load... BUT if we have already loaded it, we're better off sharing
            # the loaded data instead of reloading cache
            # parse the prop -- we can remove the front of it
            cdat = cifti_data[split]
            okwrite = True
            sii = subject_order[res][sid]
            if not cdat.is_lazy(res) or not os.path.isfile(flnm):
                dat = cdat[res][h][pp][sii]
            else:
                try:
                    return nyio.load(flnm)
                except Exception:
                    warnings.warn(
                        'failed to load HCP retinotopy cache file %s' % flnm)
                    okwrite = False
                dat = cdat[res][h][pp][sii]
            if okwrite: nyio.save(flnm, dat)
            return dat

        # okay, the base properties for the 32k and 59k meshes:
        trs = HCPRetinotopyDataset._retinotopy_cache_tr
        base_props = {
            sid: {
                h: {
                    split: {
                        res: pimms.lazy_map({
                            p: curry(_load_LR, split, sid, h, res, p, flp)
                            for (p, flp) in six.iteritems(trs['LR%dk' % res])
                        })
                        for res in [32, 59]
                    }
                    for split in [0, 1, 2]
                }
                for h in ['lh', 'rh']
            }
            for sid in sids
        }

        # okay, now we build on top of these: we add in the x/y properties
        def _add_xy(dat):
            k = next(six.iterkeys(dat))
            prefix = k.split('_')[0] + '_'
            (ang,
             ecc) = [dat[prefix + k] for k in ('polar_angle', 'eccentricity')]
            tht = np.pi / 180 * (90 - ang)
            (x, y) = [ecc * np.cos(tht), ecc * np.sin(tht)]
            return pimms.assoc(dat, prefix + 'x', x, prefix + 'y', y)

        xy_props = {
            sid: {
                h: {
                    split: pimms.lazy_map({
                        res: curry(_add_xy, rdat)
                        for (res, rdat) in six.iteritems(spdat)
                    })
                    for (split, spdat) in six.iteritems(hdat)
                }
                for (h, hdat) in six.iteritems(sdat)
            }
            for (sid, sdat) in six.iteritems(base_props)
        }

        # okay, that's it; just organize it into the desired shape
        def _reorg(hdat, res, ks):
            pfx = ks[0].split('_')[0] + '_'
            ks = ks + [pfx + 'x', pfx + 'y']
            f = lambda s, k: s[res][k]
            return pimms.lazy_map({
                (pre + k): curry(f, s, k)
                for k in ks for (s, pre) in zip([hdat[0], hdat[1], hdat[2]],
                                                ['', 'split1-', 'split2-'])
            })

        r = {
            sid: {('%s_LR%dk' % (h, res)):
                  _reorg(hdat, res, list(base_props[sid][h][0][res].keys()))
                  for (h, hdat) in six.iteritems(sdat) for res in [32, 59]}
            for (sid, sdat) in six.iteritems(xy_props)
        }
        return pimms.persist(r)
 def _load_distances(sid, h):
     flnm = pseudo_path.local_path('distances', '%s_%s.mgz' % (sid, h))
     (v, d, h) = load(flnm).T
     return pimms.persist({'ventral': v, 'dorsal': d, 'horizontal': h})