Ejemplo n.º 1
0
def test_splinelg_mle_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    df = [3, 4]
    model = splineLG(X, y, dims=dims, dt=dt, df=df, compute_mle=True)

    assert mse(uvec(model.w_mle), uvec(w_true.flatten())) < 1e-1
Ejemplo n.º 2
0
def test_splinelg_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    df = [3, 4]
    model = splineLG(X, y, dims=dims, dt=dt, df=df)
    model.fit(metric='corrcoef', num_iters=100, verbose=0, tolerance=10, beta=0.01)

    assert mse(uvec(model.w_opt), uvec(w_true.flatten())) < 1e-1
Ejemplo n.º 3
0
 def compute_score(y, y_pred, metric):
     if metric == 'r2':
         return r2(y, y_pred)
     elif metric == 'mse':
         return mse(y, y_pred)
     elif metric == 'corrcoef':
         return corrcoef(y, y_pred)
     else:
         print(f'Metric `{metric}` is not supported.')
Ejemplo n.º 4
0
def test_glm_3d_outputnl():
    w_true, X, y, dims, dt = generate_3d_rf_data(noise='white')
    df = tuple([int(np.maximum(dim / 3, 3)) for dim in dims])

    model = GLM(distr='gaussian', output_nonlinearity='exponential')
    model.add_design_matrix(X, dims=dims, df=df, smooth='cr', kind='train', filter_nonlinearity='none',
                            name='stimulus')

    model.initialize(num_subunits=1, dt=dt, method='mle', random_seed=42, compute_ci=False, y=y)
    model.fit(y={'train': y}, num_iters=300, verbose=0, step_size=0.03, beta=0.001, metric='corrcoef')

    assert model.score(X, y, metric='corrcoef') > 0.4
    assert mse(uvec(model.w['opt']['stimulus']), uvec(w_true.flatten())) < 0.01
Ejemplo n.º 5
0
def test_asd_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    model = ASD(X, y, dims=dims)
    model.fit(p0=[
        1.,
        1.,
        6.,
        6.,
    ], num_iters=10, verbose=10)

    w_fit = model.optimized_C_post @ X.T @ y / model.optimized_params[0]**2

    assert mse(uvec(w_fit), uvec(w_true.flatten())) < 1e-1
Ejemplo n.º 6
0
def test_ald_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    sigma0 = [1.3]
    rho0 = [0.8]
    params_t0 = [3., 20., 3., 20.9]  # taus, nus, tauf, nuf
    params_y0 = [3., 20., 3., 20.9]
    p0 = sigma0 + rho0 + params_t0 + params_y0
    model = ALD(X, y, dims=dims)
    model.fit(p0=p0, num_iters=30, verbose=10)

    w_fit = model.optimized_C_post @ X.T @ y / model.optimized_params[0]**2

    assert mse(uvec(w_fit), uvec(w_true.flatten())) < 1e-1

    
Ejemplo n.º 7
0
    def compute_score(y, y_pred, metric):
        """
        Metric score for evaluating model prediction.
        """

        if metric == 'r2':
            return r2(y, y_pred)

        elif metric == 'mse':
            return mse(y, y_pred)

        elif metric == 'corrcoef':
            return corrcoef(y, y_pred)

        else:
            print(f'Metric `{metric}` is not supported.')
Ejemplo n.º 8
0
def test_glm_3d_history():
    w_true, X, y, dims, dt = generate_3d_rf_data(noise='white')
    (X_train, y_train), (X_dev, y_dev), (_, _) = split_data(X, y, dt, frac_train=0.8, frac_dev=0.2)

    df = tuple([int(np.maximum(dim / 3, 3)) for dim in dims])

    model = GLM(distr='gaussian', output_nonlinearity='none')
    model.add_design_matrix(X_train, dims=dims, df=df, smooth='cr', kind='train', filter_nonlinearity='none',
                            name='stimulus')
    model.add_design_matrix(y_train, dims=[5], df=[3], smooth='cr', kind='train', filter_nonlinearity='none',
                            name='history')

    model.add_design_matrix(X_dev, dims=dims, df=df, name='stimulus', kind='dev')
    model.add_design_matrix(y_dev, dims=[5], df=[3], kind='dev', name='history')

    model.initialize(num_subunits=1, dt=dt, method='mle', random_seed=42, compute_ci=False, y=y_train)
    model.fit(
        y={'train': y_train, 'dev': y_dev}, num_iters=200, verbose=100, step_size=0.03, beta=0.001, metric='corrcoef')

    assert model.score({"stimulus": X_train, 'history': y_train}, y_train, metric='corrcoef') > 0.6
    assert model.score({"stimulus": X_dev, 'history': y_dev}, y_dev, metric='corrcoef') > 0.4
    assert mse(uvec(model.w['opt']['stimulus']), uvec(w_true.flatten())) < 0.01
Ejemplo n.º 9
0
def test_lnp_mle_small_rf():
    w_true, X, y, dims, dt = generate_2d_rf_data(noise='white')

    model = LNP(X, y, dims=dims, dt=dt, compute_mle=True)

    assert mse(uvec(model.w_mle), uvec(w_true.flatten())) < 1e-1