Ejemplo n.º 1
0
def get_network(train_input, train_output, 
                  larg, opt_params, hsm_params, noise_dist='poisson',
                  test_input = None, test_output = None, 
                  train_data_filters=None, test_data_filters=None,
                  custom_name = None, 
                  seed=0):
    '''
    Prepares inputs for NDN model instance and then creates it. Returns both inputs and the new model instance.

    Essentially just a `get_network_inputs(...)` and creation of new NDN instance.

    Note:
        The API of this method is this convoluted for backwards compatibility reasons.

    '''
    # The seeds within NDN are applied only on ._build_graph which happens after weights get initialized
    # ..need to set it now -> when NDN gets created -> weight get initiated as part of ._define_network
    # Update: Since PR #18 on NDN this is not necessary, the seeds are properly appied on model creation.
    # .. To make sure old scripts are still 100 % reproducible I left it here as they might expect the seeds 
    # .. to be set here and not just when a new model instance is created.
    np.random.seed(seed)
    tf.set_random_seed(seed)
        
    input_params = get_network_inputs(train_input, train_output, 
                                        larg, opt_params, 
                                        test_input, test_output, 
                                        train_data_filters, test_data_filters, 
                                        custom_name)

    hsm = NDN.NDN(hsm_params, noise_dist=noise_dist, tf_seed=seed)
    return hsm, input_params
Ejemplo n.º 2
0
 def get_net(self, seed):
     params = self.get_params()
     print(self.opt_params)
     bs = self.get_opt_params()['batch_size']
     seed = self.get_opt_params()['seed'] if 'seed' in self.get_opt_params(
     ) else seed
     net = NDN.NDN(params,
                   input_dim_list=[[
                       1, self.data_loader.width, self.data_loader.height
                   ]],
                   batch_size=bs,
                   noise_dist='poisson',
                   tf_seed=seed)
     return net
Ejemplo n.º 3
0
# GLM

# NDN parameters for processing the stimulus
par = NDNutils.ffnetwork_params(input_dims=[1, NX, NY, num_lags],
                                layer_sizes=[NC],
                                layer_types=['normal'],
                                normalization=[0],
                                act_funcs=['softplus'],
                                verbose=True,
                                reg_list={
                                    'd2x': [0.01],
                                    'glocal': [0.1]
                                })

# initialize GLM
glm0 = NDN.NDN([par], noise_dist='poisson')

# initialize weights with STA
# sta = (sta - np.min(sta)) / (np.max(sta) - np.min(sta))
# glm0.networks[0].layers[0].weights[:,0]=deepcopy((sta - np.min(sta)) / (np.max(sta) - np.min(sta)))

v2f0 = glm0.fit_variables(fit_biases=True)

# train initial model
_ = glm0.train(input_data=[Xstim],
               output_data=Robs,
               train_indxs=Ui,
               test_indxs=Xi,
               learning_alg='lbfgs',
               opt_params=lbfgs_params,
               fit_variables=v2f0)
Ejemplo n.º 4
0
        act_funcs=['quad'], verbose=True,
        reg_list={'d2t': [d2t], 'd2x':[d2x], 'l1':[loc]})

    quad = NDNutils.ffnetwork_params( 
        input_dims=[1,NX2,NX2,num_lags],
        layer_sizes=[NC],
        layer_types=['readout'], normalization=[0],
        act_funcs=['quad'], verbose=True,
        reg_list={'d2t': [d2t], 'd2x':[d2x], 'l1':[loc]})

    add_par = NDNutils.ffnetwork_params(
        xstim_n=None, ffnet_n=[0,1,2], layer_sizes=[NC],
        layer_types=['add'], act_funcs=['softplus'])

    # initialize GLM
    gqm0 = NDN.NDN([lin, quad, quad, add_par],  noise_dist='poisson')

    v2f0 = gqm0.fit_variables(fit_biases=False)
    v2f0[-1][-1]['biases'] = True

    stas = (Xstim.T @ (Rvalid-np.mean(Rvalid, axis=0))) / np.sum(Rvalid, axis=0)
    stas /= np.sum(stas,axis=0)
    gqm0.networks[0].layers[0].weights[:] = deepcopy(stas[:])

    # train initial model
    _ = gqm0.train(input_data=[Xstim], output_data=Rvalid,
        train_indxs=Ui, test_indxs=Xi,
        learning_alg='adam', opt_params=adam_params,
         fit_variables=v2f0)

    LLx = gqm0.eval_models(input_data=Xstim, output_data=Rvalid, data_indxs=Xi, nulladjusted=True)     
Ejemplo n.º 5
0
stim_glm_par = NDNutils.ffnetwork_params(xstim_n=None,
                                         ffnet_n=[0, 1],
                                         layer_sizes=[NC],
                                         layer_types=['add'],
                                         act_funcs=['softplus'])

sac_glm_par = NDNutils.ffnetwork_params(xstim_n=None,
                                        ffnet_n=[0, 1, 2],
                                        layer_sizes=[NC],
                                        layer_types=['add'],
                                        act_funcs=['softplus'])

#%% Base Model

baseglm = NDN.NDN([dc_shift, con_onset_par, base_glm_par],
                  tf_seed=seed,
                  noise_dist=noise_dist)

v2f0 = baseglm.fit_variables(layers_to_skip=[[], [], [0]], fit_biases=False)
v2f0[-1][-1]['biases'] = True

_ = baseglm.train(input_data=[Time, Xcon],
                  output_data=Robs,
                  train_indxs=Ui,
                  test_indxs=Xi,
                  learning_alg=optimizer,
                  opt_params=opt_params,
                  use_dropout=False,
                  fit_variables=v2f0)

#%% Find best Regularization
Ejemplo n.º 6
0
def make_model_convolutional(base_mod, dims):

    NX = dims[0]
    NY = dims[1]
    NC = base_mod.output_sizes[0]

    # find networks that process the stimulus
    stim_nets = [
        nn for nn in range(len(base_mod.network_list))
        if base_mod.network_list[nn]['xstim_n'] is not None
    ]

    par = []

    for ss in stim_nets:

        netlist_old = deepcopy(base_mod.network_list)[ss]

        # convert stimulus network params into a convolutional network
        conv_par = deepcopy(base_mod.network_list[ss])
        conv_par['input_dims'] = [1, NX, NY] + [conv_par['input_dims'][-1]]
        conv_par['layer_types'] = ['conv']
        conv_par['conv_filter_widths'] = [netlist_old['input_dims'][1]]

        par.append(deepcopy(conv_par))

    out_net = deepcopy(base_mod.network_list[-1])
    if out_net['layer_types'][0] == 'add':
        add_par = NDNutils.ffnetwork_params(
            xstim_n=None,
            ffnet_n=stim_nets,
            layer_sizes=[NX * NY * NC],
            layer_types=['add'],
            act_funcs=out_net['activation_funcs'])
        par.append(add_par)

    elif out_net['layer_types'][0] == 'side':
        out_net['layer_types'] = ['conv']
        out_net['conv_filter_widths'] = [1]
        par.append(out_net)

    cell_shift_mod = NDN.NDN(par)

    num_space = np.prod(cell_shift_mod.input_sizes[0][:-1])

    # copy stim networks verbatim (only thing diff is output is a convolution)
    for ff in stim_nets:
        for nl in range(len(cell_shift_mod.networks[ff].layers)):
            cell_shift_mod.networks[ff].layers[nl].weights = deepcopy(
                base_mod.networks[ff].layers[nl].weights)
            cell_shift_mod.networks[ff].layers[nl].biases = deepcopy(
                base_mod.networks[ff].layers[nl].biases)

    if base_mod.networks[-1].layers[0].weights.shape[0] == len(stim_nets):
        cell_shift_mod.networks[-1].layers[0].weights = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].weights), num_space)
        cell_shift_mod.networks[-1].layers[0].biases = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].biases), num_space)
    else:  # convolutional output instead of add layer
        # copy output weights
        cell_shift_mod.networks[-1].layers[0].weights = deepcopy(
            base_mod.networks[1].layers[0].weights)
        cell_shift_mod.networks[-1].layers[0].biases = deepcopy(
            base_mod.networks[1].layers[0].biases)

    return cell_shift_mod
Ejemplo n.º 7
0
def disparity_predictions(Einfo,
                          resp,
                          indxs=None,
                          num_dlags=8,
                          fr1or3=None,
                          spiking=True,
                          rectified=True,
                          opt_params=None):
    """Calculates a prediction of the disparity (and timing) signals that can be inferred from the response
    by the disparity input alone. This puts a lower bound on how much disparity is driving the response, although
    practically speaking will generate the same disparity tuning curves.
    
    Usage: Dpred, Tpred = disparity_predictions( Einfo, resp, indxs, num_dlags=8, spiking=True, rectified=True, opt_params=None )

    Inputs: Indices gives data range to fit to.
    Outputs: Dpred and Tpred will be length of entire experiment -- not just indxs
    """

    # Process disparity into disparty and timing design matrix
    dmat = disparity_matrix(Einfo['dispt'], Einfo['corrt'])
    ND2 = dmat.shape[1]
    if indxs is None:
        indxs = range(dmat.shape[0])

    # everything but blank
    Xd = NDNutils.create_time_embedding(dmat[:, :-1], [num_dlags, ND2 - 1, 1])
    # blank
    Xb = NDNutils.create_time_embedding(dmat[:, -1], [num_dlags, 1, 1])
    # timing
    switches = np.expand_dims(np.concatenate(
        (np.sum(abs(np.diff(dmat, axis=0)), axis=1), [0]), axis=0),
                              axis=1)
    Xs = NDNutils.create_time_embedding(switches, [num_dlags, 1, 1])

    tpar = NDNutils.ffnetwork_params(xstim_n=[0],
                                     input_dims=[1, 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2t': [None],
                                         'l1': [None]
                                     })
    bpar = deepcopy(tpar)
    bpar['xstim_n'] = [1]
    dpar = NDNutils.ffnetwork_params(xstim_n=[2],
                                     input_dims=[1, ND2 - 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2xt': [None],
                                         'l1': [None]
                                     })
    if rectified:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['softplus'])
    else:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['lin'])

    comb_par = deepcopy(comb_parT)
    comb_par['ffnet_n'] = [0, 1, 2]

    if spiking:
        nd = 'poisson'
    else:
        nd = 'gaussian'

    Tglm = NDN.NDN([tpar, bpar, comb_parT], noise_dist=nd, tf_seed=5)
    DTglm = NDN.NDN([tpar, bpar, dpar, comb_par], noise_dist=nd, tf_seed=5)
    v2fT = Tglm.fit_variables(layers_to_skip=[2], fit_biases=False)
    v2fT[2][0]['fit_biases'] = True
    v2f = DTglm.fit_variables(layers_to_skip=[3], fit_biases=False)
    v2f[3][0]['fit_biases'] = True

    if (fr1or3 == 3) or (fr1or3 == 1):
        mod_indxs = np.intersect1d(indxs, np.where(Einfo['frs'] == fr1or3)[0])
        #frs_valid = Einfo['frs'] == fr1or3
    else:
        mod_indxs = indxs
        #frs_valid = Einfo['frs'] > 0
    #to_use = frs_valid[indxs]

    #r = deepcopy(resp[mod_indxs])
    #if len(resp) > len(indxs):
    #    r = deepcopy(resp[indxs])
    #else:
    #    r = deepcopy(resp)

    _ = Tglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :]],
        output_data=resp[mod_indxs],  # fit_variables=v2fT,
        learning_alg='lbfgs',
        opt_params=opt_params)
    _ = DTglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :],
                    Xd[mod_indxs, :]],  # fit_variables=v2f, 
        output_data=resp[mod_indxs],
        learning_alg='lbfgs',
        opt_params=opt_params)
    #p1 = Tglm.eval_models(input_data=Xs[indxs,:], output_data=r)[0]
    #p2 = DTglm.eval_models(input_data=[Xs[indxs,:], Xd[indxs,:]], output_data=r)[0]
    #print( "Model performances: %0.4f  -> %0.4f"%(p1, p2) )

    # make predictions of each
    predT = Tglm.generate_prediction(input_data=[Xs, Xb])
    predD = DTglm.generate_prediction(input_data=[Xs, Xb, Xd])

    return predD, predT
Ejemplo n.º 8
0
def get_stim_model(Stim,
                   Robs,
                   dims,
                   valid=None,
                   num_lags=10,
                   plot=True,
                   XTreg=0.05,
                   L1reg=5e-3,
                   MIreg=0.1,
                   MSCreg=10.0,
                   Greg=0.1,
                   Mreg=1e-4,
                   num_subs=36,
                   num_hid=24,
                   num_tkern=None,
                   Cindx=None,
                   base_mod=None,
                   cids=None,
                   autoencoder=False):

    NX = dims[0]
    NY = dims[1]

    NT, NC = Robs.shape

    if valid is None:
        valid = np.arange(0, NT, 1)

    # create time-embedded stimulus
    Xstim, rinds = create_time_embedding_valid(Stim, [num_lags, NX, NY], valid)
    Rvalid = deepcopy(Robs[rinds, :])

    NTv = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NTv, NT))

    stas = Xstim.T @ (Rvalid - np.average(Rvalid, axis=0))
    stas = np.reshape(stas, [NX * NY, num_lags, NC]) / NTv

    if plot:
        plt.figure(figsize=(10, 15))
        sx, sy = U.get_subplot_dims(NC)

    mu = np.zeros(NC)
    for cc in range(NC):
        if plot:
            plt.subplot(sx, sy, cc + 1)
            plt.plot(np.abs(stas[:, :, cc]).T, color=[.5, .5, .5])
        tlevel = np.median(
            np.abs(stas[:, :, cc] - np.average(stas[:, :, cc]))) * 4
        mu[cc] = np.average(np.abs(stas[:, :, cc]) > tlevel)

        if plot:
            plt.axhline(tlevel, color='k')
            plt.title(cc)

    # threshold good STAS
    thresh = 0.01
    if plot:
        plt.figure()
        plt.plot(mu, '-o')
        plt.axhline(thresh, color='k')
        plt.show()

    if cids is None:
        cids = np.where(mu > thresh)[0]  # units to analyze
        print("found %d good STAs" % len(cids))

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in cids:
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NY, NX)))
            plt.title(cc)

    # index into "good" units
    Rvalid = Rvalid[:, cids]
    NC = Rvalid.shape[1]
    stas = stas[:, :, cids]

    if Cindx is None:
        print("Getting Crop Index")
        # Crop stimulus to center around RFs
        sumdensity = np.zeros([NX * NY])
        for cc in range(NC):
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            sumdensity += stas[:, bestlag, cc]**2

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity, [NY, NX]))
            plt.title("Sum Density STA")

        # get Crop indices (TODO: debug)
        sumdensity = (sumdensity - np.min(sumdensity)) / (np.max(sumdensity) -
                                                          np.min(sumdensity))
        I = np.reshape(sumdensity, [NY, NX]) > .3
        xinds = np.where(np.sum(I, axis=0) > 0)[0]
        yinds = np.where(np.sum(I, axis=1) > 0)[0]

        NX2 = np.maximum(len(xinds), len(yinds))
        x0 = np.min(xinds)
        y0 = np.min(yinds)

        xinds = range(x0, x0 + NX2)
        yinds = range(y0, y0 + NX2)

        Cindx = crop_indx(NX, xinds, yinds)

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity[Cindx], [NX2, NX2]))
            plt.title('Cropped')
            plt.show()

    NX2 = np.sqrt(len(Cindx)).astype(int)

    # make new cropped stimulus
    Xstim, rinds = create_time_embedding_valid(Stim[:, Cindx],
                                               [num_lags, NX2, NX2], valid)

    # index into Robs
    Rvalid = deepcopy(Robs[rinds, :])
    Rvalid = Rvalid[:, cids]
    Rvalid = NDNutils.shift_mat_zpad(Rvalid, -1, dim=0)  # get rid of first lag

    NC = Rvalid.shape[1]  # new number of units
    NT = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NT, Stim.shape[0]))
    print('%d good units' % NC)

    # double-check STAS work with cropped stimulus
    stas = Xstim.T @ Rvalid
    stas = np.reshape(stas, [NX2 * NX2, num_lags, NC]) / NT

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in range(NC):
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NX2, NX2)))
            plt.title(cc)
        plt.show()

    Ui, Xi = NDNutils.generate_xv_folds(NT)

    # fit SCAFFOLD MODEL
    try:
        if len(XTreg) == 2:
            d2t = XTreg[0]
            d2x = XTreg[1]
        else:
            d2t = XTreg[0]
            d2x = deepcopy(d2t)
    except TypeError:
        d2t = deepcopy(XTreg)
        d2x = deepcopy(XTreg)

    # optimizer parameters
    adam_params = U.def_adam_params()

    if not base_mod is None:
        side2b = base_mod.copy_model()
        side2b.set_regularization('d2t', d2t, layer_target=0)
        side2b.set_regularization('d2x', d2x, layer_target=0)
        side2b.set_regularization('glocal', Greg, layer_target=0)
        side2b.set_regularization('l1', L1reg, layer_target=0)
        side2b.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2b.set_regularization('max',
                                  MSCreg,
                                  ffnet_target=1,
                                  layer_target=0)

        if len(side2b.networks) == 4:  # includes autoencoder network
            input_data = [Xstim, Rvalid]
        else:
            input_data = Xstim

    else:
        # Best regularization arrived at
        Greg0 = 1e-1
        Mreg0 = 1e-6
        L1reg0 = 1e-5

        if not num_tkern is None:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_tkern, num_subs, num_hid],
                layer_types=['conv', 'normal', 'normal'],
                ei_layers=[None, num_subs // 2, num_hid // 2],
                conv_filter_widths=[1],
                normalization=[1, 1, 1],
                act_funcs=['lin', 'relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [1e-3],
                    'd2x': [None, XTreg],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0, Greg0]
                })
        else:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_subs, num_hid],
                layer_types=['normal', 'normal'],
                ei_layers=[num_subs // 2, num_hid // 2],
                normalization=[1, 1],
                act_funcs=['relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [d2t],
                    'd2x': [d2x],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0]
                })

        side_par = NDNutils.ffnetwork_params(network_type='side',
                                             xstim_n=None,
                                             ffnet_n=0,
                                             layer_sizes=[NC],
                                             layer_types=['normal'],
                                             normalization=[-1],
                                             act_funcs=['softplus'],
                                             verbose=True,
                                             reg_list={'max': [Mreg0]})

        side_par[
            'pos_constraints'] = True  # ensures Exc and Inh mean something

        if autoencoder:  # capturea additional variability using autoencoder
            auto_par = NDNutils.ffnetwork_params(
                input_dims=[1, NC, 1],
                xstim_n=[1],
                layer_sizes=[2, 1, NC],
                time_expand=[0, 15, 0],
                layer_types=['normal', 'temporal', 'normal'],
                conv_filter_widths=[None, 1, None],
                act_funcs=['relu', 'lin', 'lin'],
                normalization=[1, 1, 0],
                reg_list={'d2t': [None, 1e-1, None]})

            add_par = NDNutils.ffnetwork_params(xstim_n=None,
                                                ffnet_n=[1, 2],
                                                layer_sizes=[NC],
                                                layer_types=['add'],
                                                act_funcs=['softplus'])

            side2 = NDN.NDN([ndn_par, side_par, auto_par, add_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            # set output regularization on the latent
            side2.batch_size = adam_params['batch_size']
            side2.initialize_output_reg(network_target=2,
                                        layer_target=1,
                                        reg_vals={'d2t': 1e-1})

            input_data = [Xstim, Rvalid]

        else:
            side2 = NDN.NDN([ndn_par, side_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            input_data = Xstim

        _ = side2.train(input_data=input_data,
                        output_data=Rvalid,
                        train_indxs=Ui,
                        test_indxs=Xi,
                        silent=False,
                        learning_alg='adam',
                        opt_params=adam_params)

        side2.set_regularization('glocal', Greg, layer_target=0)
        side2.set_regularization('l1', L1reg, layer_target=0)
        side2.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2.set_regularization('max', MSCreg, ffnet_target=1, layer_target=0)

        side2b = side2.copy_model()

    _ = side2b.train(input_data=input_data,
                     output_data=Rvalid,
                     train_indxs=Ui,
                     test_indxs=Xi,
                     silent=False,
                     learning_alg='adam',
                     opt_params=adam_params)

    LLs2n = side2b.eval_models(input_data=input_data,
                               output_data=Rvalid,
                               data_indxs=Xi,
                               nulladjusted=True)
    print(np.mean(LLs2n))
    if plot:
        plt.hist(LLs2n)
        plt.xlabel('Nats/Spike')
        plt.show()

    return side2b, Xstim, Rvalid, rinds, cids, Cindx
Ui = np.intersect1d(Ui, np.where(np.logical_and(eyeX < 0, eyeY < 0))[0])
num_subs = NC // 2

nim_par = NDNutils.ffnetwork_params(input_dims=[1, NX, NY, num_lags],
                                    layer_sizes=[num_subs, NC],
                                    layer_types=['normal', 'normal'],
                                    act_funcs=['relu', 'softplus'],
                                    normalization=[0],
                                    reg_list={
                                        'l2': 1e-2,
                                        'd2xt': 1e-5,
                                        'glocal': 1e-1
                                    })

nim0 = NDN.NDN([nim_par], tf_seed=seed, noise_dist=noise_dist)

v2f = nim0.fit_variables(fit_biases=True)

# train
_ = nim0.train(input_data=[Xstim],
               output_data=Robs,
               train_indxs=Ui,
               test_indxs=Xi,
               learning_alg=optimizer,
               opt_params=opt_params,
               use_dropout=False,
               fit_variables=v2f)

print("Done")
Ejemplo n.º 10
0
    layer_sizes=[2, 1, NC],
    time_expand=[0, 15, 0],
    layer_types=['normal', 'temporal', 'normal'],
    conv_filter_widths=[None, 1, None],
    act_funcs=['relu', 'lin', 'lin'],
    normalization=[1, 1, 0],
    reg_list={'d2t': [None, 1e-5, None]})

add_par = NDNutils.ffnetwork_params(xstim_n=None,
                                    ffnet_n=[0, 1],
                                    layer_sizes=[NC],
                                    layer_types=['add'],
                                    act_funcs=['softplus'])

glm = NDN.NDN([glm_par, autoencoder, add_par],
              tf_seed=seed,
              noise_dist=noise_dist)

# time_expand=[0, 0, num_ca_lags], normalization=[0,0,0],
#     layer_types=['conv','normal', 'conv'], conv_filter_widths=[L, None, 1],

glm.batch_size = adam_params['batch_size']
glm.initialize_output_reg(network_target=1,
                          layer_target=1,
                          reg_vals={'d2t': 1e-1})
glm.time_spread = 100

v2f = glm.fit_variables()
for nn in range(len(v2f[1]) - 1):
    v2f[1][nn]['biases'] = False
#%%
Ejemplo n.º 11
0
    layer_sizes=[num_subs, num_hid], 
    layer_types=['normal','normal'],
    normalization=[1, 1],
    act_funcs=['relu', 'relu'],
    verbose=True,
    reg_list={'d2x':[XTreg], 'l1':[L1reg0, L1reg0], 'glocal':[Greg0]})


side_par = NDNutils.ffnetwork_params( 
    network_type='side', xstim_n=None, ffnet_n=1, layer_sizes=[NC], 
    layer_types=['normal'], normalization=[-1], act_funcs=['softplus'], verbose=True,
    reg_list={'max':[Mreg0]})

side_par['pos_constraints']=True

side2 = NDN.NDN( [t_layer, ndn_par, side_par], ffnet_out=2, noise_dist='poisson')

#%%
gab_array = DU.gabor_array(NX2//2, num_angles=num_subs//2, both_phases=True)
side2.networks[1].layers[0].weights = deepcopy(gab_array)
            
input_data = stmp

NumBlocks = blocks.shape[0]
bad_blocks = np.where((blocks[:,1]-blocks[:,0]) < 10)[0]
good_blocks = np.setdiff1d(np.arange(0, NumBlocks-1), bad_blocks)
blocks = blocks[good_blocks,:]
NumBlocks = blocks.shape[0]

Ui,Xi = NDNutils.generate_xv_folds(NumBlocks)
_ = side2.train(input_data=input_data, output_data=Rvalid, train_indxs=Ui, test_indxs=Xi, silent=False, 
Ejemplo n.º 12
0
                                    conv_filter_widths=[1, 12, None],
                                    ei_layers=[None, num_subs // 2],
                                    normalization=[2, 1, -1],
                                    act_funcs=['relu', 'relu', 'softplus'],
                                    verbose=True,
                                    reg_list={
                                        'd2t': [1e-4],
                                        'd2x': [None, Xreg],
                                        'l1': [None, L1reg0],
                                        'center': [None, Creg0],
                                        'glocal': [None, Greg0],
                                        'max': [None, None, Mreg0]
                                    })

# stim only
retV1 = NDN.NDN([ndn_par], ffnet_out=0, noise_dist='poisson')
retV1.networks[0].layers[0].weights[:, :] = 0
retV1.networks[0].layers[0].weights[2:4, 0] = 1
retV1.networks[0].layers[0].weights[2:4, 1] = -1

v2f = retV1.fit_variables(fit_biases=False)
v2f[0][0]['biases'] = True
v2f[-1][-1]['biases'] = True

#%% train
_ = retV1.train(input_data=[Xstim],
                output_data=Robs,
                train_indxs=Ui,
                test_indxs=Xi,
                silent=False,
                learning_alg='adam',
Ejemplo n.º 13
0
    layer_sizes=[2, 1, NC],
    time_expand=[0, 10, 0],
    layer_types=['normal', 'temporal', 'normal'],
    conv_filter_widths=[None, 1, None],
    act_funcs=['lin', 'lin', 'lin'],
    normalization=[1, 1, -1],
    reg_list={'d2t': [None, 1e-3, None]})

add_par = NDNutils.ffnetwork_params(xstim_n=None,
                                    ffnet_n=[0, 1],
                                    layer_sizes=[NC],
                                    layer_types=['add'],
                                    act_funcs=['softplus'])

retV1b = NDN.NDN([ndn_par, auto_par, add_par],
                 ffnet_out=2,
                 noise_dist='poisson')

# set output regularization on the latent
retV1b.batch_size = adam_params['batch_size']
retV1b.initialize_output_reg(network_target=1,
                             layer_target=1,
                             reg_vals={'d2t': .1})

retV1b.networks[0].layers[0].weights[:, :] = 0
retV1b.networks[0].layers[0].weights[2:4, 0] = 1
retV1b.networks[0].layers[0].weights[2:4, 1] = -1

v2fb = retV1b.fit_variables(fit_biases=True)
for nn in range(len(retV1b.networks)):
    for nl in range(len(retV1b.networks[nn].layers)):