Beispiel #1
0
def scaled_gabor_kernel(cpp, theta=0, zero_mean=True, **kwargs):
    '''
    scaled_gabor_kernel(...) is identical to gabor_kernel(...) except that the resulting kernel is
    scaled such that the response of the kernel to a grating of identical frequency and angle and
    min/max values of -/+ 1 is 1.
    scaled_gabor_kernel has one additional argument, zero_mean=True, which specifies that the kernel
    should (or should not) be given a zero mean value. The gabor_kernel function alone does not do
    this, but scaled_gabor_kernel does this by default, unless zero_mean is set to False.
    '''
    if pimms.is_quantity(cpp): cpp = cpp.to(units.cycle / units.px).m
    if pimms.is_quantity(theta): theta = theta.to(units.rad).m
    kern = gabor_kernel(cpp, theta=theta, **kwargs)
    # First, zero-mean them
    if zero_mean:
        kern = (kern.real -
                np.mean(kern.real)) + 1j * (kern.imag - np.mean(kern.imag))
    # Next, make the max response grating
    (n, m) = kern.shape
    (cn, cm) = [0.5 * (q - 1) for q in [n, m]]
    (costh, sinth) = (np.cos(theta), np.sin(theta))
    mtx = (2 * np.pi * cpp) * np.asarray(
        [[costh * (col - cm) + sinth * (row - cn) for col in range(m)]
         for row in range(n)])
    re = kern.real / np.sum(np.abs(kern.real * np.cos(mtx)))
    im = kern.imag / np.sum(np.abs(kern.imag * np.sin(mtx)))
    return np.asarray(re + 1j * im)
Beispiel #2
0
def calc_pRF_centers(polar_angles, eccentricities):
    '''
    calc_pRF_centers is a calculation that transforms polar_angles and eccentricities into
    pRF_centers, which are (x,y) coordinates, in degrees, in the visual field. 

    Required afferent parameters:
      @ polar_angles The polar angle values (obtained from import_benson14_from_freesurfer).
      @ eccentricities The eccentricity values (also from import_benson14_from_freesurfer).

    Efferent output values:
      @ pRF_centers Will be an n x 2 numpy matrix of the (x,y) pRF centers for each pRF in the
        visual field

    Notes:
      * The polar_angles and eccentricities parameters are expected to use pint's unit system and
        be either in degrees or radians
    '''
    if not pimms.is_quantity(polar_angles):
        warnings.warn(
            'polar_angles is not a quantity; assuming that it is in degrees')
        polar_angles = polar_angles * units.degree
    if not pimms.is_quantity(eccentricities):
        warnings.warn(
            'polar_angle is not a quantity; assuming that it is in degrees')
        eccentricities = eccentricities * units.degree
    angle_unit = eccentricities.u
    # Get the pRF centers:
    ang = np.pi / 2 - polar_angles.to('rad').m
    xs = eccentricities * np.cos(ang)
    ys = eccentricities * np.sin(ang)
    pRF_centers = np.asarray([xs.to(angle_unit).m,
                              ys.to(angle_unit).m]).T * angle_unit
    # That's it:
    return pRF_centers.to('deg')
Beispiel #3
0
 def to_cycles_per_pixel(self, cpd):
     '''
     f.to_cycles_per_pixel(cpd) yields the given cpd value in cycles per pixel; if cpd has no
     units it is assumed to be in units of cycles per degree. This uses the conversion factor
     stored in f.pixels_per_degree to make any required conversion.
     '''
     cpp = None
     if pimms.is_quantity(cpd):
         try:
             cpp = cpd.to(
                 units.cycles / units.degree) / self.pixels_per_degree
         except:
             cpp = None
         if cpp is None:
             try:
                 cpp = cpd.to(units.cycles / units.pixel)
             except:
                 raise ValueError(
                     'frequency must be in cycles/degree or cycles/pixel')
     else:
         cpp = (cpd *
                (units.cycles / units.degree)) / self.pixels_per_degree
     # also, cast to float...
     cpp = float(cpp.m) * cpp.u
     return cpp
Beispiel #4
0
 def pixels_per_degree(d2p):
     '''
     f.pixels_per_degree is the number of pixels per degree in the visual field of the image
     array tracked by the ImageArrayContrastFilter object f.
     '''
     d2p_unit = units.px / units.degree
     d2p = d2p.to(d2p_unit) if pimms.is_quantity(d2p) else d2p * d2p_unit
     return d2p
Beispiel #5
0
def gabor_kernel(cpp, theta=0, mean=0, scaling='max_response', **kwargs):
    '''
    gabor_kernel(cpp) is similar to identical to skimage.filters.gabor_kernel(cpp) except that the
      resulting kernel is scaled and standardized according to the optional arguments.

    Options:
      * theta (default: 0) is the angle of the Gabor to pass to skimage.filters.gabor_kernel.
      * mean (default: 0) specifies that the Gabor matrix should be made to have the given mean
        value. If None, then this is left unchanged from skimage.filters.gabor_kernel.
      * scaling (default: 'max_response') specifies how the Gabor matrix should be scaled. If 
        None, then the matrix is left unchanged from skimage.filters.gabor_kernel. Other valid
        values are 'max_response'--scale the Gabor such that it's response to a cosine grating with
        a frequency of cpp (the first parameter) cycles per pixel is equal to 1; 'volume'--scale the
        matrix to have unit volume; or 'max'--scale the matrix so that the max of its absolute value
        is 1.
      * All other options are passed to skimage.filters.gabor_kernel.
    '''
    import numpy as np
    from skimage.filters import gabor_kernel as gk
    if _pimms.is_quantity(cpp): cpp = cpp.to(units.cycle / units.px).m
    if _pimms.is_quantity(theta): theta = theta.to(units.rad).m
    kern = gk(cpp, theta=theta, **kwargs)
    # First, fix the mean
    if mean is not None: kern = kern - np.mean(kern) + mean
    # Then normalize
    if scaling is None: return kern
    scaling = scaling.lower()
    if scaling == 'max_response':
        (n,m) = kern.shape
        (cn,cm) = [0.5*(q - 1) for q in [n,m]]
        (costh, sinth) = (np.cos(theta), np.sin(theta))
        mtx = (2*np.pi*cpp) * np.asarray(
            [[costh*(col - cm) - sinth*((n-row) - cn) for col in range(m)]
             for row in range(n)])
        mtx = np.cos(mtx)
        kern /= np.sum(np.abs(kern * mtx))
    elif scaling == 'max':
        kern /= np.max(np.abs(kern))
    elif scaling == 'volume':
        kern /= np.sqrt(np.sum(np.abs(kern.flatten())**2))
    else:
        raise ValueError('unrecognized scaling parameter: %s' % scaling)
    return kern
Beispiel #6
0
 def sigma(sig):
     '''
     prf.sigma is the pRF sigma parameter in degrees; see also radius.
     '''
     if pimms.is_quantity(sig):
         if sig.u == units.rad:   sig = sig.to(units.deg)
         elif not (sig.u == units.deg):
             raise ValueError('pRF sigma must be in degrees or radians')
     else:
         sig = sig * units.deg
     if sig <= 0: raise ValueError('sigma must be positive')
     return sig
Beispiel #7
0
 def gabor_orientations(go):
     '''
     f.gabor_orientations is a read-only numpy array of the gabor orientations at which to
     examine contrast; all elements are in radians.
     '''
     if not hasattr(go, '__iter__'):
         go = np.asarray(range(go), dtype=np.dtype(float).type)
         go *= np.pi / float(len(go))
     urad = units.rad
     go = urad * np.asarray(
         [g.to(urad).m if pimms.is_quantity(g) else g for g in go],
         dtype=np.dtype(float).type)
     go.setflags(write=False)
     return go
Beispiel #8
0
 def test_units(self):
     '''
     test_units ensures that the various pimms functions related to pint integration work
     correctly; these functions include pimms.unit, .mag, .quant, .is_quantity, etc.
     '''
     # make a few pieces of data with types
     x = np.asarray([1.0, 2.0, 3.0, 4.0]) * pimms.units.mm
     y = pimms.quant([2, 4, 6, 8], 'sec')
     for u in [x, y]:
         self.assertTrue(pimms.is_quantity(u))
     for u in ('abc', 123, 9.0, []):
         self.assertFalse(pimms.is_quantity(u))
     for u in [x, y]:
         self.assertFalse(pimms.is_quantity(pimms.mag(u)))
     self.assertTrue(pimms.like_units(pimms.unit(x), pimms.unit('yards')))
     self.assertTrue(pimms.like_units(pimms.unit(y), pimms.unit('minutes')))
     self.assertFalse(pimms.like_units(pimms.unit(y), pimms.unit('mm')))
     z = x / y
     self.assertTrue(pimms.is_vector(x, 'real'))
     self.assertTrue(pimms.is_vector(y, 'real'))
     self.assertFalse(pimms.is_vector(x, 'int'))
     self.assertTrue(pimms.is_vector(y, 'int'))
     self.assertTrue(pimms.is_vector(y, 'float'))
     self.assertTrue(pimms.is_vector(z, 'real'))
Beispiel #9
0
 def center(pt):
     '''
     prf.center is the (x,y) coordinate vector of the center of the pRF in degrees.
     '''
     if pimms.is_quantity(pt):
         if pt.u == units.rad: pt = pt.to(units.deg)
         elif not (pt.u == units.deg):
             raise ValueError('pRF centers must be in degrees or radians')
     else:
         # assume degrees
         pt = pt * units.deg
     pt = np.asarray(pt.m) * pt.u
     pt.setflags(write=False)
     if len(pt.shape) != 1 or pt.shape[0] != 2:
         raise ValueError('pRF centers must be 2D')
     return pt
Beispiel #10
0
 def _params(self, imshape, d2p):
     if len(imshape) > 2: imshape = imshape[-2:]
     if not pimms.is_quantity(d2p): d2p = d2p * (units.px/units.deg)
     imshape = imshape * units.px
     x0 = np.asarray([(imshape[0]*0.5 - self.center[1]*d2p).to(units.px).m,
                      (imshape[1]*0.5 + self.center[0]*d2p).to(units.px).m])
     rad = self.radius * d2p
     dst = (self.n_radii * rad).m
     rrng0 = (int(np.floor(x0[0] - dst)), int(np.ceil(x0[0] + dst)))
     crng0 = (int(np.floor(x0[1] - dst)), int(np.ceil(x0[1] + dst)))
     rrng = (max([rrng0[0], 0]), min([rrng0[1], imshape[0].m]))
     crng = (max([crng0[0], 0]), min([crng0[1], imshape[1].m]))
     if any(s[1] - s[0] <= 0 for s in [rrng, crng]):
         print (self.center, self.sigma, self.exponent, self.n_radii)
         print (imshape, d2p, dst, rrng0, rrng, crng0, crng)
         raise ValueError('Bad image or std given to PRFSpec._params()')
     return (x0, rad, dst, rrng, crng, rrng0, crng0)
Beispiel #11
0
def calc_contrast_energies(contrast_filter, divisive_normalization_function,
                           divisive_normalization_parameters,
                           cpd_sensitivities):
    '''
    calc_contrast_energies is a calculator that performs divisive normalization on the filtered
    contrast images and yields a nested map of contrast energy arrays; contrast_energies map has
    keys that are spatial frequencies (in cycles per degree) and whose values are maps; these maps
    have keys that are parameter value maps and whose values are the 3D contrast energy arrays.

    Required afferent parameters:
      * contrast_filter
      * divisive_normalization_function, divisive_normalization_parameters
      * cpd_sensitivities

    Output efferent values:
      @ contrast_energies Will be a nested map whose first level of keys are persistent-maps of the
        divisive normalization parameters and whose second level of keys are a set of frequencies;
        the values at the second level are the stacks of contrast energy images for the particular
        divisive normalization parameters and frequencies specified in the keys.
    '''
    # first, calculate the contrast energies at each frequency for all images then we combine them;
    # since this reuses images internally when the parameters are the same, it shouldn't be too
    # inefficient:
    divnfn = divisive_normalization_function
    params = divisive_normalization_parameters
    all_cpds = np.unique([
        k.to(units.cycle / units.deg).m if pimms.is_quantity(k) else k
        for s in cpd_sensitivities for k in s.iterkeys()
    ])
    all_cpds = all_cpds * (units.cycles / units.degree)
    rsps = {
        cpd: vw.contrast_energy
        for cpd in all_cpds for vw in
        [ImageArrayContrastView(contrast_filter, cpd, divnfn, params)]
    }
    # flip this around...
    flip = {}
    for (k0, v0) in rsps.iteritems():
        for (k1, v1) in v0.iteritems():
            if k1 not in flip: flip[k1] = {}
            flip[k1][k0] = v1
    rsps = pyr.pmap({k: pyr.pmap(v) for (k, v) in flip.iteritems()})
    return {'contrast_energies': rsps}
Beispiel #12
0
def calc_contrast_filter(image_array,
                         pixels_per_degree,
                         normalized_pixels_per_degree,
                         gabor_orientations,
                         background,
                         cpd_sensitivities,
                         use_spatial_gabors=False):
    '''
    calc_contrast is a calculator that takes as input a normalized image stack and various parameter
    data and produces an object of type ImageArrayContrastFilter, contrast_filterm an object that
    can be called as contrast_filter(frequency) to yield a map whose keys are gabor orientations (in
    radians) and whose values are image stacks with identical shapes as image_array but that have
    been filtered at the key's orientation and the frequency.

    Required afferent values:
      * image_array
      * normalized_pixels_per_degree
      * gabor_orientations
      * background
      @ use_spatial_gabors Must be either True (use spatial gabor filters instead of the steerable
        pyramid) or False (use the steerable pyramid); by default this is False.

    Efferent output values:
      @ contrast_filter Will be an object of type ImageArrayContrastFilter that can filter the image
        array at arbitrary frequencies and divisive normalization parameters.
    '''
    if normalized_pixels_per_degree is None:
        normalized_pixels_per_degree = pixels_per_degree
    all_cpds = np.unique([
        k.to(units.cycle / units.deg).m if pimms.is_quantity(k) else k
        for s in cpd_sensitivities for k in s.iterkeys()
    ])
    # find this difference...
    bw = np.mean(np.abs(all_cpds[1:] - all_cpds[:-1]) / all_cpds[:-1])
    # all the parameter checking and transformation is handled in this class
    return ImageArrayContrastFilter(image_array,
                                    normalized_pixels_per_degree,
                                    gabor_orientations,
                                    background,
                                    spatial_gabors=use_spatial_gabors,
                                    bandwidth=bw)
Beispiel #13
0
 def frequency(cpd):
     '''
     rsp.frequency is the frequency at which the image array response results are calculated.
     This may be in cycles per degree or cycles per pixel; alternately use rsp.cpd or rsp.cpp.
     '''
     if pimms.is_quantity(cpd):
         org = cpd
         try:
             cpd = org.to(units.cycles / units.degree)
         except:
             cpd = None
         if cpd is None:
             try:
                 cpd = org.to(units.cycles / units.pixel)
             except:
                 raise ValueError(
                     'frequency must be in cycles/degree or cycles/pixel')
     else:
         cpd = cpd * (units.cycles / units.degree)
     # also, cast to float...
     return float(cpd.m) * cpd.u
Beispiel #14
0
 def postprocess_image(self, img, d):
     from nibabel.nifti1 import slice_order_codes
     hdr = img.header
     # dimension information:
     for k in ['dimension_information', 'dim_info', 'diminfo']:
         try:
             hdr.set_dim_info(*d[k])
             break
         except Exception:
             pass
     try:
         hdr.set_intent(d['intent'])
     except Exception:
         pass
     # xyzt_units:
     try:
         sunit = self.unit_to_name(pimms.unit(d['voxel_size']))
     except Exception:
         try:
             sunit = self.unit_to_name(pimms.unit(d['voxel_unit']))
         except Exception:
             sunit = 'unknown'
     try:
         tunit = self.unit_to_name(pimms.unit(d['slice_duration']))
     except Exception:
         try:
             tunit = self.unit_to_name(pimms.unit(d['time_unit']))
         except Exception:
             tunit = 'unknown'
     try:
         hdr.set_xyzt_units(sunit, tunit)
     except Exception:
         pass
     # qform and sform
     try:
         try:
             q = to_affine(d['qform'])
         except Exception:
             q = to_affine(d['affine'])
         qc = d.get('qform_code', None)
         hdr.set_qform(q, qc)
     except Exception:
         pass
     try:
         try:
             s = to_affine(d['sform'])
         except Exception:
             s = to_affine(d['affine'])
         sc = d.get('sform_code', None)
         hdr.set_sform(s, sc)
     except Exception:
         pass
     # slice code
     try:
         hdr['slice_code'] = slice_order_codes[d['slice_order']]
     except Exception:
         pass
     # slice duration
     try:
         dur = d['slice_duration']
         if pimms.is_quantity(dur):
             if tunit == 'unknown': dur = pimms.mag(dur)
             else: dur = pimms.mag(dur, tunit)
         hdr.set_slice_duration(dur)
     except Exception:
         pass
     # slice timing
     try:
         ts = d['slice_times']
         if pimms.is_quantity(ts):
             if tunit == 'unknown': ts = pimms.mag(ts)
             else: ts = pimms.mag(ts, tunit)
         hdr.set_slice_duration([None if np.isnan(t) else t for t in ts])
     except Exception:
         pass
     # slope / intercept
     try:
         hdr.set_slope_inter(d.get('data_slope', None),
                             d.get('data_offset', None))
     except Exception:
         pass
     # calibration
     try:
         (cmn, cmx) = d['calibration']
         hdr['cal_min'] = cmn
         hdr['cal_max'] = cmx
     except Exception:
         pass
     # time offset
     try:
         t0 = d['time_offset']
         if pimms.is_quantity(t0):
             if tunits != 'unknown': t0 = pimms.mag(t0, tunits)
             else: t0 = pimms.mag(t0)
         hdr['toffset'] = t0
     except Exception:
         pass
     # description
     try:
         hdr['descrip'] = d['description']
     except Exception:
         pass
     # auxiliary filename
     try:
         hdr['aux_file'] = d['auxiliary_filename']
     except Exception:
         pass
     return img
Beispiel #15
0
def import_benson14_from_freesurfer(freesurfer_subject,
                                    max_eccentricity,
                                    modality='surface',
                                    import_filter=None):
    '''
    import_benson14_from_freesurfer is a calculation that imports (or creates then imports) the
    Benson et al. (2014) template of retinotopy for the subject, whose neuropythy.freesurfer
    Subject object must be provided in the parameter freesurfer_subject. The optional parameter
    modality (default: 'volume') may be either 'volume' or 'surface', and determines if the loaded
    modality is volumetric or surface-based.

    Required afferent parameters:
      @ freesurfer_subject Must be a valid neuropythy.freesurfer.Subject object.
 
    Optional afferent parameters:
      @ modality May be 'volume' or 'surface' to specify the anatomical modality.
      @ max_eccentricity May specifies the maximum eccentricity value to use.
      @ import_filter If specified, may give a function that accepts four parameters:
        f(polar_angle, eccentricity, label, hemi); if this function fails to return True for the 
        appropriate values of a particular vertex/voxel, then that vertex/voxel is not included in
        the prediction.

    Provided efferent values:
      @ polar_angles    Polar angle values for each vertex/voxel.
      @ eccentricities  Eccentricity values for each vertex/voxel.
      @ labels          An integer label 1, 2, or 3 for V1, V2, or V3, one per vertex/voxel.
      @ hemispheres     1 if left, -1 if right for all vertex/voxel.
      @ cortex_indices  For vertices, the vertex index (in the appropriate hemisphere) for each;
                        for voxels, the (i,j,k) voxel index for each.
      @ cortex_coordinates For voxels, this is the (i,j,k) voxel index (same as cortex_indices); for
                        surfaces, this is ths (x,y,z) position of each vertex in surface-space.

    Notes:
      * polar_angles are always given such that a negative polar angle indicates a RH value and a
        positive polar angle inidicates a LH value
      * cortex_indices is different for surface and volume modalities
      * labels will always be 1, 2, or 3 indicating V1, V2, or V3

    '''
    max_eccentricity = max_eccentricity.to(units.deg) if pimms.is_quantity(max_eccentricity) else \
                       max_eccentricity*units.deg
    subject = freesurfer_subject
    if modality.lower() == 'volume':
        # make sure there are template volume files that match this subject
        ang = os.path.join(subject.path, 'mri', 'benson14_angle.mgz')
        ecc = os.path.join(subject.path, 'mri', 'benson14_eccen.mgz')
        lab = os.path.join(subject.path, 'mri', 'benson14_varea.mgz')
        if not os.path.exists(ang) or not os.path.exists(
                ecc) or not os.path.exists(lab):
            # Apply the template first...
            neurocmd.benson14_retinotopy.main(subject.path)
        if not os.path.exists(ang) or not os.path.exists(
                ecc) or not os.path.exists(lab):
            raise ValueError('No areas template found/created for subject: ' +
                             lab)
        angle_mgz = fs.mghformat.load(ang)
        eccen_mgz = fs.mghformat.load(ecc)
        label_mgz = fs.mghformat.load(lab)
        ribbon_mgzs = (subject.mgh_images['lh.ribbon'],
                       subject.mgh_images['rh.ribbon'])
        # The variables are all mgz volumes, so we need to extract the values:
        labels = np.round(np.abs(label_mgz.dataobj.get_unscaled()))
        angles = angle_mgz.dataobj.get_unscaled()
        eccens = eccen_mgz.dataobj.get_unscaled()
        (lrib, rrib) = [r.dataobj.get_unscaled() for r in ribbon_mgzs]
        # Find the voxel indices first:
        # for now we only look at v1-v3
        labels[labels > 3] = 0
        coords = np.asarray(np.where(labels.astype(bool))).T
        # Grab the hemispheres; filter down if something isn't in the ribbon
        tmp = [(1 if lrib[i, j, k] == 1 else -1, (i, j, k))
               for (i, j, k) in coords
               if lrib[i, j, k] != 0 or rrib[i, j, k] != 0
               if eccens[i, j, k] < max_eccentricity.m]
        hemis = np.asarray([r[0] for r in tmp], dtype=np.int)
        idcs = np.asarray([r[1] for r in tmp], dtype=np.int)
        coords = np.asarray(idcs, dtype=np.float)
        # Pull out the angle/eccen data
        angs0 = np.asarray([angles[i, j, k] for (i, j, k) in idcs])
        angles = angs0 * hemis
        eccens = np.asarray([eccens[i, j, k] for (i, j, k) in idcs],
                            dtype=np.float)
        labels = np.asarray([labels[i, j, k] for (i, j, k) in idcs],
                            dtype=np.int)
    elif modality.lower() == 'surface':
        rx = freesurfer_subject.RH.midgray_surface.coordinates.T
        lx = freesurfer_subject.LH.midgray_surface.coordinates.T
        # make sure there are template volume files that match this subject
        lang = os.path.join(subject.path, 'surf', 'lh.benson14_angle.mgz')
        lecc = os.path.join(subject.path, 'surf', 'lh.benson14_eccen.mgz')
        llab = os.path.join(subject.path, 'surf', 'lh.benson14_varea.mgz')
        rang = os.path.join(subject.path, 'surf', 'rh.benson14_angle.mgz')
        recc = os.path.join(subject.path, 'surf', 'rh.benson14_eccen.mgz')
        rlab = os.path.join(subject.path, 'surf', 'rh.benson14_varea.mgz')
        (lang, lecc, llab, rang, recc, rlab) = [
            flnm if os.path.isfile(flnm) else flnm[:-4]
            for flnm in (lang, lecc, llab, rang, recc, rlab)
        ]
        if not os.path.exists(lang) or not os.path.exists(rang) or \
           not os.path.exists(lecc) or not os.path.exists(recc) or \
           not os.path.exists(llab) or not os.path.exists(rlab):
            # Apply the template first...
            neurocmd.benson14_retinotopy.main(subject.path)
        if not os.path.exists(lang) or not os.path.exists(rang) or \
           not os.path.exists(lecc) or not os.path.exists(recc) or \
           not os.path.exists(llab) or not os.path.exists(rlab):
            raise ValueError(
                'No anatomical template found/created for subject')
        (lang, lecc, llab, rang, recc, rlab) = [
            neuro.load(fl) for fl in (lang, lecc, llab, rang, recc, rlab)
        ]
        llab = np.round(np.abs(llab))
        rlab = np.round(np.abs(rlab))
        (angs0, eccs, labs) = [
            np.concatenate([ldat, rdat], axis=0)
            for (ldat, rdat) in zip([lang, lecc, llab], [rang, recc, rlab])
        ]
        idcs = np.concatenate([range(len(lang)), range(len(rang))], axis=0)
        valid = np.intersect1d(
            np.intersect1d(np.where(labs > 0)[0],
                           np.where(labs < 4)[0]),
            np.where(eccs < max_eccentricity.m)[0])
        idcs = idcs[valid]
        coords = np.concatenate([lx, rx], axis=0)[valid]
        hemis = np.concatenate([[1 for a in lang], [-1 for a in rang]],
                               axis=0)[valid]
        # old versions of the template had positive numbers in both hemispheres...
        if np.mean(angs0[valid[hemis == -1]]) > 0:
            angles = angs0[valid] * hemis
        else:
            angles = angs0[valid]
        eccens = eccs[valid]
        labels = np.asarray(labs[valid], dtype=np.int)
    else:
        raise ValueError('Option modality must be \'surface\' or \'volume\'')
    # do the filtering and convert to pvectors
    if import_filter is None:
        res = {
            'polar_angles': units.degree * angles,
            'eccentricities': units.degree * eccens,
            'labels': labels,
            'cortex_indices': idcs,
            'cortex_coordinates': coords,
            'hemispheres': hemis
        }
    else:
        sels = [
            i for (i, (p, e, l,
                       h)) in enumerate(zip(angles, eccens, labels, hemis))
            if import_filter(p, e, l, h)
        ]
        res = {
            'polar_angles': units.degree * angles[sels],
            'eccentricities': units.degree * eccens[sels],
            'labels': labels[sels],
            'cortex_indices': idcs[sels],
            'cortex_coordinates': coords[sels],
            'hemispheres': hemis[sels]
        }
    # make sure they're all write-only
    for v in res.itervalues():
        v.setflags(write=False)
    return res
Beispiel #16
0
 def chop(x):
     x = float(
         x.to(units.cycle / units.degree).m if pimms.is_quantity(x) else x)
     return np.round(x, 5)
Beispiel #17
0
def calc_pRF_SOC(pRFs, contrast_energies, cpd_sensitivities,
                 divisive_normalization_parameters, contrast_constants,
                 pixels_per_degree, normalized_pixels_per_degree):
    '''
    calc_pRF_SOC is a calculator that is responsible for calculating the individual SOC responses
    of the pRFs by extracting their pRFs from the contrast_energies and weighting them according
    to the cpd_sensitivities.

    Required afferent parameters:
      * pRFS
      * contrast_energies
      * cpd_sensitivities
      * divisive_normalization_parameters

    Provided efferent parameters:
      @ pRF_SOC Will be an array of the second-order-contrast energies, one per pRF per image;
        these will be stored in an (n x m) matrix where n is the number of pRFs and m is the
        number of images.
    '''
    if normalized_pixels_per_degree is None:
        normalized_pixels_per_degree = pixels_per_degree
    d2p = normalized_pixels_per_degree
    d2p = d2p.to(
        units.px /
        units.deg) if pimms.is_quantity(d2p) else d2p * (units.px / units.deg)
    params = divisive_normalization_parameters
    n = len(next(next(contrast_energies.itervalues()).itervalues()))
    m = len(pRFs)
    imshape = next(next(
        contrast_energies.itervalues()).itervalues()).shape[1:3]
    imlen = imshape[0] * imshape[1]
    socs = np.zeros((m, n))

    # we want to avoid numerical mismatch, so we round the keys to the nearest 10^-5
    def chop(x):
        x = float(
            x.to(units.cycle / units.degree).m if pimms.is_quantity(x) else x)
        return np.round(x, 5)

    contrast_energies = {
        k0: {chop(k): v
             for (k, v) in v0.iteritems()}
        for (k0, v0) in contrast_energies.iteritems()
    }
    for (i, prf, p, ss, c) in zip(range(m), pRFs, params.rows,
                                  cpd_sensitivities, contrast_constants):
        wts = None
        uu = None
        for (cpd, w) in ss.iteritems():
            cpd = chop(cpd)
            if wts is None:
                (u, wts) = prf(contrast_energies[p][cpd], d2p, c=None)
                uu = np.zeros(u.shape)
            else:
                u = prf(contrast_energies[p][cpd], d2p, c=None,
                        weights=False)[0]
            uu += w * u
        # Here is the SOC formula: (x - c<x>)^2
        wts = npml.repmat(wts, len(uu), 1)
        mu = np.sum(wts * uu, axis=1)
        socs[i, :] = np.sum(wts *
                            (uu - c * npml.repmat(mu, uu.shape[1], 1).T)**2,
                            axis=1)
    socs.setflags(write=False)
    return socs