Пример #1
0
def test_splinelg_mle_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    df = [3, 4]
    model = splineLG(X, y, dims=dims, dt=dt, df=df, compute_mle=True)

    assert mse(uvec(model.w_mle), uvec(w_true.flatten())) < 1e-1
Пример #2
0
    def fit_nonparametric_nonlinearity(self, nbins=50, w=None):

        if w is None:
            if self.w_spl is not None:
                w = self.w_spl.flatten()
            elif self.w_mle is not None:
                w = self.w_mle.flatten()
            elif self.w_sta is not None:
                w = self.w_sta.flatten()
        else:
            w = jnp.array(w)

        X = self.X
        X = X.reshape(X.shape[0], -1)
        y = self.y

        output_raw = X @ uvec(w)
        output_spk = X[y != 0] @ uvec(w)

        hist_raw, bins = jnp.histogram(output_raw, bins=nbins, density=True)
        hist_spk, _ = jnp.histogram(output_spk, bins=bins, density=True)

        mask = ~(hist_raw == 0)

        yy0 = hist_spk[mask] / hist_raw[mask]

        self.nl_bins = bins[1:]
        self.fnl_nonparametric = interp1d(bins[1:][mask], yy0)
Пример #3
0
def test_splinelg_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    df = [3, 4]
    model = splineLG(X, y, dims=dims, dt=dt, df=df)
    model.fit(metric='corrcoef', num_iters=100, verbose=0, tolerance=10, beta=0.01)

    assert mse(uvec(model.w_opt), uvec(w_true.flatten())) < 1e-1
Пример #4
0
def test_glm_3d_outputnl():
    w_true, X, y, dims, dt = generate_3d_rf_data(noise='white')
    df = tuple([int(np.maximum(dim / 3, 3)) for dim in dims])

    model = GLM(distr='gaussian', output_nonlinearity='exponential')
    model.add_design_matrix(X, dims=dims, df=df, smooth='cr', kind='train', filter_nonlinearity='none',
                            name='stimulus')

    model.initialize(num_subunits=1, dt=dt, method='mle', random_seed=42, compute_ci=False, y=y)
    model.fit(y={'train': y}, num_iters=300, verbose=0, step_size=0.03, beta=0.001, metric='corrcoef')

    assert model.score(X, y, metric='corrcoef') > 0.4
    assert mse(uvec(model.w['opt']['stimulus']), uvec(w_true.flatten())) < 0.01
Пример #5
0
def test_asd_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    model = ASD(X, y, dims=dims)
    model.fit(p0=[
        1.,
        1.,
        6.,
        6.,
    ], num_iters=10, verbose=10)

    w_fit = model.optimized_C_post @ X.T @ y / model.optimized_params[0]**2

    assert mse(uvec(w_fit), uvec(w_true.flatten())) < 1e-1
Пример #6
0
def V1complex_2d(dims=(30, 40), scale=(.025, .03)):
    dt = 1 / 60  # time bin size
    nt = dims[0]
    nx = dims[1]
    tt = np.arange(-nt * dt, 0, dt)

    kt1 = scipy.stats.gamma.pdf(-tt, dims[0] / 7.5, scale=scale[0])
    kt2 = scipy.stats.gamma.pdf(-tt, dims[1] / 6, scale=scale[1])
    kt1 /= np.linalg.norm(kt1)
    kt2 /= -np.linalg.norm(kt2)

    kt = np.vstack([kt1, kt2]).T

    xx = np.linspace(-2, 2, nx)

    kx1 = np.cos(2 * np.pi * xx / 2 + np.pi / 5) * np.exp(
        -1 / (2 * 0.35**2) * xx**2)
    kx2 = np.sin(2 * np.pi * xx / 2 + np.pi / 5) * np.exp(
        -1 / (2 * 0.35**2) * xx**2)

    kx1 /= np.linalg.norm(kx1)
    kx2 /= np.linalg.norm(kx2)

    kx = np.vstack([kx1, kx2])

    k = kt @ kx

    return uvec(k)
Пример #7
0
def bs(x, df, degree=3):
    """
    
    B-spline basis. Knots placed equally by percentile.
    
    Simplified from `patsy.bs`:
    https://github.com/pydata/patsy/blob/master/patsy/splines.py
    
    """

    from scipy.interpolate import BSpline

    def _get_all_sorted_knots(_x, _df, _degree):
        order = _degree + 1
        n_inner_knots = _df - order
        knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1] * 100
        inner_knots = np.percentile(_x, knot_quantiles)
        all_knots = np.hstack(([np.min(_x), np.max(_x)] * order, inner_knots))
        all_knots = np.sort(all_knots)

        return all_knots

    x = np.asarray(x)
    df = np.asarray(df)

    knots = _get_all_sorted_knots(x, df, degree)
    n_bases = len(knots) - (degree + 1)
    coeff = np.eye(n_bases)
    basis = np.vstack(
        [BSpline(knots, coeff[i], degree)(x) for i in range(n_bases)]).T

    return uvec(basis)
Пример #8
0
def test_ald_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    sigma0 = [1.3]
    rho0 = [0.8]
    params_t0 = [3., 20., 3., 20.9]  # taus, nus, tauf, nuf
    params_y0 = [3., 20., 3., 20.9]
    p0 = sigma0 + rho0 + params_t0 + params_y0
    model = ALD(X, y, dims=dims)
    model.fit(p0=p0, num_iters=30, verbose=10)

    w_fit = model.optimized_C_post @ X.T @ y / model.optimized_params[0]**2

    assert mse(uvec(w_fit), uvec(w_true.flatten())) < 1e-1

    
Пример #9
0
def test_glm_3d_history():
    w_true, X, y, dims, dt = generate_3d_rf_data(noise='white')
    (X_train, y_train), (X_dev, y_dev), (_, _) = split_data(X, y, dt, frac_train=0.8, frac_dev=0.2)

    df = tuple([int(np.maximum(dim / 3, 3)) for dim in dims])

    model = GLM(distr='gaussian', output_nonlinearity='none')
    model.add_design_matrix(X_train, dims=dims, df=df, smooth='cr', kind='train', filter_nonlinearity='none',
                            name='stimulus')
    model.add_design_matrix(y_train, dims=[5], df=[3], smooth='cr', kind='train', filter_nonlinearity='none',
                            name='history')

    model.add_design_matrix(X_dev, dims=dims, df=df, name='stimulus', kind='dev')
    model.add_design_matrix(y_dev, dims=[5], df=[3], kind='dev', name='history')

    model.initialize(num_subunits=1, dt=dt, method='mle', random_seed=42, compute_ci=False, y=y_train)
    model.fit(
        y={'train': y_train, 'dev': y_dev}, num_iters=200, verbose=100, step_size=0.03, beta=0.001, metric='corrcoef')

    assert model.score({"stimulus": X_train, 'history': y_train}, y_train, metric='corrcoef') > 0.6
    assert model.score({"stimulus": X_dev, 'history': y_dev}, y_dev, metric='corrcoef') > 0.4
    assert mse(uvec(model.w['opt']['stimulus']), uvec(w_true.flatten())) < 0.01
Пример #10
0
def gabor2d(dims=(25, 25), omega=0.5, theta=np.pi / 6, func=np.cos, K=1.):
    radius = (int(dims[1] / 2.0), int(dims[0] / 2.0))
    [x, y] = np.meshgrid(range(-radius[0], radius[0] + 1),
                         range(-radius[1], radius[1] + 1))

    x1 = x * np.cos(theta) + y * np.sin(theta)
    y1 = -x * np.sin(theta) + y * np.cos(theta)

    gauss = omega**2 / (4 * np.pi * K**2) * np.exp(-omega**2 / (8 * K**2) *
                                                   (4 * x1**2 + y1**2))
    sinusoid = func(omega * x1) * np.exp(K**2 / 2)
    gabor = gauss * sinusoid

    return uvec(gabor)[:dims[0], :dims[1]]
Пример #11
0
def tp(x, df):
    """
    
    Simplified implementation of the truncated Thin Plate (TP) regression spline.
    See Wood, S. (2017) p.216-217

    """
    def eta(r):
        return r**2 * np.log(r + 1e-10)

    E = eta(np.abs(x.ravel() - x.ravel().reshape(-1, 1)))
    U, _, _ = np.linalg.svd(E)
    basis = U[:, :int(df)]

    return uvec(basis)
Пример #12
0
def cr(x, df):
    """
    
    Natural cubic regression splines. Knots placed equally by percentile. 
    
    Simplified from `patsy.cr`:
    https://github.com/pydata/patsy/blob/master/patsy/mgcv_cubic_splines.py
    
    """
    def _get_all_sorted_knots(_x, _df):
        n_inner_knots = _df - 2
        knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1] * 100
        inner_knots = np.percentile(np.unique(_x), knot_quantiles)
        all_knots = np.concatenate(([np.min(_x), np.max(_x)], inner_knots))
        all_knots = np.unique(all_knots)

        return all_knots

    knots = _get_all_sorted_knots(x, df)

    j = np.maximum(np.searchsorted(knots, x) - 1, 0)
    h = np.mean(knots[1:] - knots[:-1])  # constant

    ajm = (knots[j + 1] - x) / h
    ajp = (x - knots[j]) / h
    cjm = ((knots[j + 1] - x)**3 / h - h * (knots[j + 1] - x)) / 6
    cjp = ((x - knots[j])**3 / h - h * (x - knots[j])) / 6

    B = np.sum([
        np.diag([1, 2, 1][i] * h * np.ones(df - [3, 2, 3][i]), [-1, 0, 1][i]) /
        [6, 3, 6][i] for i in range(3)
    ], 0)
    D = np.sum([[1, -2, 1][i] * np.pad(
        np.eye(df - 2) / h, pad_width=((0, 0), (i, 2 - i)), mode='constant')
                for i in range(3)], 0)

    f = np.vstack([np.zeros(df), np.linalg.solve(B, D), np.zeros(df)])
    i = np.eye(df)

    basis = ajm * i[j, :].T + ajp * i[j + 1, :].T + cjm * f[j, :].T + cjp * f[
        j + 1, :].T

    return uvec(basis.T)
Пример #13
0
def subunits2d(num_subunits=5,
               dims=(25, 25),
               std=(3, 3),
               offset=(3, 3),
               kind='gaussian',
               random_seed=2046):
    if kind == 'gaussian':
        filter2d = gaussian2d
    elif kind == 'mexicanhat':
        filter2d = mexicanhat2d
    else:
        raise NotImplementedError(kind)

    w = np.zeros(list(dims) + [num_subunits])
    f = filter2d(dims, std)
    for i in range(num_subunits):

        if random_seed is not None:
            np.random.seed(random_seed + i + 5)
        h, v = np.random.randint(low=-offset[0], high=offset[1], size=2)
        w[:, :, i] = uvec(np.roll(np.roll(f, h, axis=0), v, axis=1))

    return w
Пример #14
0
    def fit_STC(self,
                prewhiten=False,
                n_repeats=10,
                percentile=100.,
                random_seed=2046,
                verbose=5):
        """

        Spike-triggered Covariance Analysis.

        Parameters
        ==========

        prewhiten: bool

        n_repeats: int
            Number of repeats for STC significance test.

        percentile: float
            Valid range of STC significance test.

        verbose: int
        random_seed: int
        """
        def get_stc(_X, _y, _w):

            n = len(_X)
            ste = _X[_y != 0]
            proj = ste - ste * _w * _w.T
            stc = proj.T @ proj / (n - 1)

            _eigvec, _eigval, _ = jnp.linalg.svd(stc)

            return _eigvec, _eigval

        key = random.PRNGKey(random_seed)

        y = self.y

        if prewhiten:

            if self.compute_mle is False:
                self.XtX = self.X.T @ self.X
                self.w_mle = jnp.linalg.solve(self.XtX, self.XtY)

            X = jnp.linalg.solve(self.XtX, self.X.T).T
            w = uvec(self.w_mle)

        else:
            X = self.X
            w = uvec(self.w_sta)

        eigvec, eigval = get_stc(X, y, w)

        self.w_stc = dict()
        if n_repeats:
            print('STC significance test: ')
            eigval_null = []
            for counter in range(n_repeats):
                if verbose:
                    if counter % int(verbose) == 0:
                        print(f'  {counter + 1:}/{n_repeats}')

                y_randomize = random.permutation(key, y)
                _, eigval_randomize = get_stc(X, y_randomize, w)
                eigval_null.append(eigval_randomize)
            else:
                if verbose:
                    print(f'Done.')
            eigval_null = jnp.vstack(eigval_null)
            max_null, min_null = jnp.percentile(eigval_null,
                                                percentile), jnp.percentile(
                                                    eigval_null,
                                                    100 - percentile)
            mask_sig_pos = eigval > max_null
            mask_sig_neg = eigval < min_null
            mask_sig = jnp.logical_or(mask_sig_pos, mask_sig_neg)

            self.w_stc['eigvec'] = eigvec
            self.w_stc['pos'] = eigvec[:, mask_sig_pos]
            self.w_stc['neg'] = eigvec[:, mask_sig_neg]

            self.w_stc['eigval'] = eigval
            self.w_stc['eigval_mask'] = mask_sig
            self.w_stc['eigval_pos_mask'] = mask_sig_pos
            self.w_stc['eigval_neg_mask'] = mask_sig_neg

            self.w_stc['max_null'] = max_null
            self.w_stc['min_null'] = min_null

        else:
            self.w_stc['eigvec'] = eigvec
            self.w_stc['eigval'] = eigval
            self.w_stc['eigval_mask'] = jnp.ones_like(eigval).astype(bool)
Пример #15
0
def mexicanhat1d(dims=200, std=15., a=0.8):
    g0 = gaussian1d(dims, std)
    g1 = gaussian1d(dims, std * a)
    m = g1 - 0.65 * g0
    return uvec(m)
Пример #16
0
def mexicanhat2d(dims=(25, 25), std=(3., 3.), a=0.55):
    g0 = gaussian2d(dims, std)
    g1 = gaussian2d(dims, np.array(std) * a)
    m = g1 - 0.65 * g0
    return uvec(m)
Пример #17
0
def gaussian1d(dim=200, std=15.):
    return uvec(scipy.signal.gaussian(dim, std=std))
Пример #18
0
def cc(x, df):
    """

    Cyclic cubic regression splines. Knots placed equally by percentile.

    Simplified from `patsy.cc`:
    https://github.com/pydata/patsy/blob/master/patsy/mgcv_cubic_splines.py

    """
    def _map_cyclic(_x, lbound, ubound):
        _x = np.copy(_x)
        _x[_x > ubound] = lbound + (_x[_x > ubound] - ubound) % (ubound -
                                                                 lbound)
        _x[_x < lbound] = ubound - (lbound - _x[_x < lbound]) % (ubound -
                                                                 lbound)

        return _x

    def _get_all_sorted_knots(_x, _df):
        n_inner_knots = _df - 2

        knot_quantiles = np.linspace(0, 1, n_inner_knots + 2)[1:-1] * 100
        inner_knots = np.percentile(np.unique(_x), knot_quantiles)

        all_knots = np.concatenate(([np.min(_x), np.max(_x)], inner_knots))
        all_knots = np.unique(all_knots)

        return all_knots

    def _get_cyclic_f(_knots):
        kd = _knots[1:] - _knots[:-1]
        n = _knots.size - 1
        b = np.zeros((n, n))
        d = np.zeros((n, n))

        b[0, 0] = (kd[n - 1] + kd[0]) / 3.
        b[0, n - 1] = kd[n - 1] / 6.
        b[n - 1, 0] = kd[n - 1] / 6.

        d[0, 0] = -1. / kd[0] - 1. / kd[n - 1]
        d[0, n - 1] = 1. / kd[n - 1]
        d[n - 1, 0] = 1. / kd[n - 1]

        for i in range(1, n):
            b[i, i] = (kd[i - 1] + kd[i]) / 3.
            b[i, i - 1] = kd[i - 1] / 6.
            b[i - 1, i] = kd[i - 1] / 6.

            d[i, i] = -1. / kd[i - 1] - 1. / kd[i]
            d[i, i - 1] = 1. / kd[i - 1]
            d[i - 1, i] = 1. / kd[i - 1]

        return np.linalg.solve(b, d)

    knots = _get_all_sorted_knots(x, df)  # length = df

    x = _map_cyclic(x, min(knots), max(knots))
    df -= 1

    j = np.maximum(np.searchsorted(knots, x) - 1, 0)
    h = np.mean(knots[1:] - knots[:-1])  # constant

    ajm = (knots[j + 1] - x) / h
    ajp = (x - knots[j]) / h
    cjm = ((knots[j + 1] - x)**3 / h - h * (knots[j + 1] - x)) / 6
    cjp = ((x - knots[j])**3 / h - h * (x - knots[j])) / 6

    f = _get_cyclic_f(knots)

    i = np.eye(df)
    j1 = j + 1
    j1[j1 == df] = 0

    basis = ajm * i[j, :].T + ajp * i[j1, :].T + cjm * f[j, :].T + cjp * f[
        j1, :].T

    return uvec(basis.T)
Пример #19
0
def test_lnp_mle_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    model = LNP(X, y, dims=dims, dt=dt, compute_mle=True)

    assert mse(uvec(model.w_mle), uvec(w_true.flatten())) < 1e-1
Пример #20
0
def gabor3d(dims, std, omega, theta, func=np.cos, K=np.pi):
    g_t = np.gradient(gaussian1d(dims[0], std))
    g_s = gabor2d(dims[1:], omega, theta, func, K).flatten()
    g = np.kron(g_t, g_s)
    return uvec(g).reshape(dims)
Пример #21
0
def gaussian3d(dims, std):
    gaussian_t = np.gradient(gaussian1d(dims[0], std[0]))
    gaussian_s = gaussian2d(dims[1:], std[1:]).flatten()
    return uvec(np.kron(gaussian_t, gaussian_s)).reshape(dims)
Пример #22
0
def mexicanhat3d(dims, std, a=0.3):
    g_t = np.gradient(gaussian1d(dims[0], std[0]))
    m_s = mexicanhat2d(dims[1:], std[1:], a).flatten()
    m = np.kron(g_t, m_s)
    return uvec(m).reshape(dims)
Пример #23
0
def build_spline_matrix(dims, df, smooth, dtype=np.float64):
    """
    
    Building spline matrix for n-dimensional RF (n=[1,2,3]) with tensor product smooth.
    
    Parameters
    ==========
    
    dims : list or array_like, shape (d, )
        Dimensions or shape of the RF to estimate. Assumed order [t, sx, sy]
        
    df : list or array_like, shape (d,) 
        Degree of freedom for spline / smooth basis. Same length as dims.
        
    smooth : str
        Spline or smooth to be used. Current supported methods include:
        * `bs`: B-spline
        * `cr`: Cubic Regression spline
        * `tp`: (Simplified) Thin Plate regression spline 

    dtype : dtype
        Data type S will be cast to before returning

    Return
    ======
    
    S : array_like, shape (n_features, n_spline_coef)
        Spline matrix. Each column is one basis. 

    Note
    ====

    ---outdated
    A mesh-free (actually simpler) way to do this is to get the spline bases for each dimension, 
    then calculate the kronecker product of them, for example:

    >>> dims, df = (10, 10, 10), (3, 3, 3)
    >>> St, Sy, Sx = [basis(np.arange(d), f), for d, f in zip(dims, df)]
    >>> S = np.kron(St, np.kron(Sy, Sx)) 
    
    Here we use a mesh-based `te` approach to keep consistent with the Patsy inplementation / Wood, S. (2017).
    ----

    Now we switched to the mesh-free inmplemtation.
        
    """

    ndim = len(dims)  # store RF dimemsion

    # initialize list of degree of freedom for each dimension
    if len(df) != ndim:
        raise ValueError("`df` must have the same length as `dims`")

    if smooth == 'cr':
        basis = cr  # Natural cubic regression spline
    elif smooth == 'cc':
        basis = cc  # cyclic cubic regression spline
    elif smooth == 'bs':
        basis = bs  # b-spline
    elif smooth == 'tp':
        basis = tp  # thin-plate spline
    else:
        raise ValueError("Input method `{}` is not supported.".format(smooth))

    # build spline matrix
    if ndim == 1:

        g0 = np.arange(dims[0])
        S = basis(g0.ravel(), df[0])

    elif ndim == 2:

        g0 = np.arange(dims[0])
        g1 = np.arange(dims[1])

        St = basis(g0.ravel(), df[0])
        Sx = basis(g1.ravel(), df[1])

        S = np.kron(St, Sx)

    elif ndim == 3:

        g0 = np.arange(dims[0])
        g1 = np.arange(dims[1])
        g2 = np.arange(dims[2])

        St = basis(g0.ravel(), df[0])
        Sx = basis(g1.ravel(), df[1])
        Sy = basis(g2.ravel(), df[2])

        S = np.kron(St, np.kron(Sx, Sy))

    else:
        raise NotImplementedError(ndim)

    return uvec(S).astype(dtype)
Пример #24
0
def gaussian2d(dims=(25, 25), std=(2., 2.)):
    gaussian_x = gaussian1d(dims[0], std=std[0])
    gaussian_y = gaussian1d(dims[1], std=std[1])
    return uvec(np.kron(gaussian_x, gaussian_y)).reshape(dims)