コード例 #1
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[9, int(hidden * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [None, reg_h, reg_l],
            })
        hsm_params['weights_initializers'] = ['random', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #2
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[c_filters, c_filters, c_filters,
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=['conv', 'conv', 'conv', 'sep'],
            act_funcs=['softplus', 'softplus', 'softplus', 'softplus'],
            shift_spacing=[2, 2, 2, 0],
            conv_filter_widths=[13, 3, 3, 0],
            reg_list={
                'd2x': [cd2x, cd2x, cd2x, None],
                'max_filt': [max_filt, max_filt, max_filt, None],
                'l1': [None, None, None, l1]
            })
        hsm_params['weights_initializers'] = [
            'normal', 'normal', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #3
0
ファイル: bs4_exp10.py プロジェクト: lucabaroni/msc-neuro
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h], 
            layer_sizes=[c_filters, int(0.2*output_shape), output_shape], # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0], 
            layer_types=['conv_diff_of_gaussians', hidden_lt, 'normal'],
            act_funcs=['lin', 'softplus','softplus'],
            
            shift_spacing=[(c_size+1)//2, 0],
            conv_filter_widths=[c_size, 0, 0],

            reg_list={
                hidden_t:[None, hidden_s, None],
                'l2':[None, None, 0.1],
                })
        hsm_params['weights_initializers']=['random','normal','normal']
        hsm_params['biases_initializers']=['trunc_normal','trunc_normal','trunc_normal']

        return hsm_params
コード例 #4
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                int(self.args['hidden']),
                int(self.args['hidden']),
                int(0.2 * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=[
                'var', 'conv_diff_of_gaussians', self.args['layer'], 'normal'
            ],
            act_funcs=['lin', 'lin', 'softplus', 'softplus'],
            shift_spacing=[1, (self.args['c_size'] + 1) // 2, 1, 1],
            conv_filter_widths=[
                self.args['c_size'], self.args['c_size'], 0, 0
            ],
            reg_list={
                'l2': [0.1, None, None, 0.1],
                'l1': [None, None, self.args['reg_h'], None],
            })
        hsm_params['weights_initializers'] = [
            'normal', 'random', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #5
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[1, self.width, self.height],
         layer_sizes=[
             self.args['channels'], self.args['channels'],
             int(0.2 * self.out_num), self.out_num
         ],  # paper: 9, 0.2*output_shape
         ei_layers=[None, None, None, None],
         normalization=[0, 0, 0, 0],
         layer_types=['var', 'conv', self.args['hidden_lt'], 'normal'],
         act_funcs=['lin', 'softplus', 'softplus', 'softplus'],
         shift_spacing=[1, (self.args['c_size'] + 1) // 2, 1, 1],
         conv_filter_widths=[0, self.args['c_size'], 0, 0],
         reg_list={
             'd2x': [None, self.args['cd2x'], None, None],
             self.args['hidden_t']:
             [None, None, self.args['hidden_s'], None],
             'l2': [0.1, None, None, 0.1],
         })
     params['weights_initializers'] = [
         'normal', 'normal', 'normal', 'normal'
     ]
     params['biases_initializers'] = [
         'normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     return params
コード例 #6
0
ファイル: generator_net.py プロジェクト: kovacspe/diplomka
    def get_encoder(self,
                    noise_size,
                    input_shape,
                    ffnet_in,
                    generator_type='conv'):
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'conv', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[1, 2, 2, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'lin':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['normal', 'normal', 'normal'],
                act_funcs=[
                    'relu',
                    'relu',
                    'relu',
                ],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'normal', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[2, 2, None, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = None
        params['ffnet_n'] = [ffnet_in]
        return params
コード例 #7
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                int(self.args['hidden'] * self.out_num),
                int(self.args['hidden'] * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['var', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [0.1, None, self.args['reg_l']],
                'd2x': [None, self.args['reg_h'], None],
            })
        hsm_params['weights_initializers'] = ['normal', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #8
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h], 
            layer_sizes=[output_shape], # paper: 9, 0.2*output_shape
            ei_layers=[None],
            normalization=[0], 
            layer_types=['normal'],
            act_funcs=['softplus'],
            reg_list={
                'd2x':[reg_l],
                })
        hsm_params['weights_initializers']=['normal']
        hsm_params['biases_initializers']=['trunc_normal']

        return hsm_params
コード例 #9
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                self.args['filt_size'], self.args['filt_size'],
                int(self.args['perc_output'] * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=['var', 'diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [0.1, None, None, 0.1],
            })
        hsm_params['weights_initializers'] = [
            'normal', 'random', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #10
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[1, self.width, self.height],
         layer_sizes=[
             self.args['channels'], self.args['channels'],
             self.args['channels'], self.out_num
         ],
         layer_types=['conv', 'conv', 'conv', 'sep'],
         act_funcs=['softplus', 'softplus', 'lin', 'softplus'],
         shift_spacing=[1, 1, 1, None],
         reg_list={
             #'d2x': [0.03, 0.015, 0.015, None],
             'l1': [None, None, None, 0.02]
         })
     params['conv_filter_widths'] = [13, 5, 5, None]
     params['weights_initializers'] = [
         'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     params['bias_initializers'] = [
         'zeros', 'zeros', 'zeros', 'trunc_normal'
     ]
     params['pos_constraint'] = [False, False, False, True]
     return params
コード例 #11
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[10],
         layer_sizes=[[31, 31], 30,
                      int(0.2 * self.out_num),
                      self.out_num],  # paper: 9, 0.2*output_shape
         ei_layers=[None, None, None, None],
         normalization=[0, 0, 0, 0],
         layer_types=['normal', 'conv', 'conv', 'normal'],
         act_funcs=['lin', 'softplus', 'softplus', 'softplus'],
         shift_spacing=[1, (7 + 1) // 2, 1, 1],
         conv_filter_widths=[0, 7, 0, 0],
         reg_list={
             'd2x': [None, 0.2, None, None],
             'l2': [0.1, None, None, 0.1],
         })
     params['weights_initializers'] = [
         'normal', 'normal', 'normal', 'normal'
     ]
     params['biases_initializers'] = [
         'normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     return params
コード例 #12
0
},
                                          learning_alg='lbfgs')
lbfgs_params['maxiter'] = 1000

# setup training indices
valdata = np.arange(0, NT, 1)

Ui, Xi = NDNutils.generate_xv_folds(NT, num_blocks=2)

# NDN parameters for processing the stimulus
par = NDNutils.ffnetwork_params(input_dims=[2, NX, NY, num_lags],
                                layer_sizes=[NC],
                                layer_types=['normal'],
                                normalization=[0],
                                act_funcs=['softplus'],
                                verbose=True,
                                reg_list={
                                    'd2t': [.01],
                                    'd2x': [0.01],
                                    'glocal': [0.01]
                                })

# initialize GLM
glm0 = NDN.NDN([par], noise_dist='poisson')

v2f0 = glm0.fit_variables(fit_biases=False)
v2f0[-1][-1]['biases'] = True

R = Robs[:, cids].astype('float32')
# train initial model
_ = glm0.train(input_data=[Xstim],
コード例 #13
0
plt.figure()
# f = plt.hist2d( eyeX[Ui], eyeY[Ui], bins=100)
sns.kdeplot(eyeX[Ui], eyeY[Ui], bw=.05, shade=True, shade_lowest=False)
plt.title('Eye Position Density')

rs = np.hypot(eyeX, eyeY)

Ui = np.intersect1d(Ui, np.where(np.logical_and(eyeX < 0, eyeY < 0))[0])
num_subs = NC // 2

nim_par = NDNutils.ffnetwork_params(input_dims=[1, NX, NY, num_lags],
                                    layer_sizes=[num_subs, NC],
                                    layer_types=['normal', 'normal'],
                                    act_funcs=['relu', 'softplus'],
                                    normalization=[0],
                                    reg_list={
                                        'l2': 1e-2,
                                        'd2xt': 1e-5,
                                        'glocal': 1e-1
                                    })

nim0 = NDN.NDN([nim_par], tf_seed=seed, noise_dist=noise_dist)

v2f = nim0.fit_variables(fit_biases=True)

# train
_ = nim0.train(input_data=[Xstim],
               output_data=Robs,
               train_indxs=Ui,
               test_indxs=Xi,
               learning_alg=optimizer,
コード例 #14
0
d2xs = 1e-2*10**np.arange(0, 5)
gqms = []
LLxs = []
for step in range(len(d2xs)):

    d2t = .05
    d2x = d2xs[step]
    loc = 1e-5

    num_lags = dims[0]
    NX2 = dims[1]
    NC = Rvalid.shape[1]
    # NDN parameters for processing the stimulus
    lin = NDNutils.ffnetwork_params( 
        input_dims=[1,NX2,NX2,num_lags],
        layer_sizes=[NC],
        layer_types=['readout'], normalization=[0],
        act_funcs=['lin'], verbose=True,
        reg_list={'d2t': [d2t], 'd2x':[d2x], 'l1':[loc]})

    quad = NDNutils.ffnetwork_params( 
        input_dims=[1,NX2,NX2,num_lags],
        layer_sizes=[NC],
        layer_types=['readout'], normalization=[0],
        act_funcs=['quad'], verbose=True,
        reg_list={'d2t': [d2t], 'd2x':[d2x], 'l1':[loc]})

    quad = NDNutils.ffnetwork_params( 
        input_dims=[1,NX2,NX2,num_lags],
        layer_sizes=[NC],
        layer_types=['readout'], normalization=[0],
        act_funcs=['quad'], verbose=True,
コード例 #15
0
if optimizer == 'adam':
    opt_params = adam_params
else:
    opt_params = lbfgs_params

# %% fit GLM
Ui = opts['Ui']
Xi = opts['Xi']
Ti = opts['Ti']

glm_par = NDNutils.ffnetwork_params(input_dims=[1, NX, NY, num_lags],
                                    layer_sizes=[NC],
                                    layer_types=['readout'],
                                    act_funcs=['lin'],
                                    normalization=[0],
                                    reg_list={
                                        'l2': 1e-2,
                                        'd2xt': 1e-5,
                                        'l1': 1e-5
                                    })

autoencoder = NDNutils.ffnetwork_params(
    input_dims=[1, NC, 1],
    xstim_n=[1],
    layer_sizes=[2, 1, NC],
    time_expand=[0, 15, 0],
    layer_types=['normal', 'temporal', 'normal'],
    conv_filter_widths=[None, 1, None],
    act_funcs=['relu', 'lin', 'lin'],
    normalization=[1, 1, 0],
    reg_list={'d2t': [None, 1e-5, None]})
コード例 #16
0
# setup training indices
NT = Xstim.shape[0]
valdata = np.arange(0, NT, 1)

NC = 1
Ui, Xi = NDNutils.generate_xv_folds(NT, num_blocks=2)

# GLM

# NDN parameters for processing the stimulus
par = NDNutils.ffnetwork_params(input_dims=[1, NX, NY, num_lags],
                                layer_sizes=[NC],
                                layer_types=['normal'],
                                normalization=[0],
                                act_funcs=['softplus'],
                                verbose=True,
                                reg_list={
                                    'd2x': [0.01],
                                    'glocal': [0.1]
                                })

# initialize GLM
glm0 = NDN.NDN([par], noise_dist='poisson')

# initialize weights with STA
# sta = (sta - np.min(sta)) / (np.max(sta) - np.min(sta))
# glm0.networks[0].layers[0].weights[:,0]=deepcopy((sta - np.min(sta)) / (np.max(sta) - np.min(sta)))

v2f0 = glm0.fit_variables(fit_biases=True)

# train initial model
コード例 #17
0
saconshift = NDNutils.shift_mat_zpad(sacon, -back_shiftson, dim=0)
num_onlags = max(tspacing) + 1

Xdir = NDNutils.create_time_embedding(gratingDir, [num_lags, ND])
Xcon = NDNutils.create_time_embedding(gratingCon, [num_lags, 1])

Ui, Xi = NDNutils.generate_xv_folds(NT)
NC = Robs.shape[1]

Time = NDNutils.tent_basis_generate(xs=np.linspace(0, NT - 1, 20))

dc_shift = NDNutils.ffnetwork_params(
    input_dims=[1, Time.shape[1]],
    layer_sizes=[NC],
    layer_types=['normal'],  # readout for cell-specific regularization
    act_funcs=['lin'],
    normalization=[0],
    reg_list={
        'd2x': [1e-3],
        'l2': [1e-4]
    })

dir_tuning_par = NDNutils.ffnetwork_params(
    input_dims=[num_lags, ND],
    xstim_n=[1],
    layer_sizes=[10, NC],
    layer_types=['normal',
                 'normal'],  # readout for cell-specific regularization
    act_funcs=['lin', 'lin'],
    normalization=[1, 0],
    reg_list={
        'd2x': [1e-5],
コード例 #18
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def make_model_convolutional(base_mod, dims):

    NX = dims[0]
    NY = dims[1]
    NC = base_mod.output_sizes[0]

    # find networks that process the stimulus
    stim_nets = [
        nn for nn in range(len(base_mod.network_list))
        if base_mod.network_list[nn]['xstim_n'] is not None
    ]

    par = []

    for ss in stim_nets:

        netlist_old = deepcopy(base_mod.network_list)[ss]

        # convert stimulus network params into a convolutional network
        conv_par = deepcopy(base_mod.network_list[ss])
        conv_par['input_dims'] = [1, NX, NY] + [conv_par['input_dims'][-1]]
        conv_par['layer_types'] = ['conv']
        conv_par['conv_filter_widths'] = [netlist_old['input_dims'][1]]

        par.append(deepcopy(conv_par))

    out_net = deepcopy(base_mod.network_list[-1])
    if out_net['layer_types'][0] == 'add':
        add_par = NDNutils.ffnetwork_params(
            xstim_n=None,
            ffnet_n=stim_nets,
            layer_sizes=[NX * NY * NC],
            layer_types=['add'],
            act_funcs=out_net['activation_funcs'])
        par.append(add_par)

    elif out_net['layer_types'][0] == 'side':
        out_net['layer_types'] = ['conv']
        out_net['conv_filter_widths'] = [1]
        par.append(out_net)

    cell_shift_mod = NDN.NDN(par)

    num_space = np.prod(cell_shift_mod.input_sizes[0][:-1])

    # copy stim networks verbatim (only thing diff is output is a convolution)
    for ff in stim_nets:
        for nl in range(len(cell_shift_mod.networks[ff].layers)):
            cell_shift_mod.networks[ff].layers[nl].weights = deepcopy(
                base_mod.networks[ff].layers[nl].weights)
            cell_shift_mod.networks[ff].layers[nl].biases = deepcopy(
                base_mod.networks[ff].layers[nl].biases)

    if base_mod.networks[-1].layers[0].weights.shape[0] == len(stim_nets):
        cell_shift_mod.networks[-1].layers[0].weights = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].weights), num_space)
        cell_shift_mod.networks[-1].layers[0].biases = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].biases), num_space)
    else:  # convolutional output instead of add layer
        # copy output weights
        cell_shift_mod.networks[-1].layers[0].weights = deepcopy(
            base_mod.networks[1].layers[0].weights)
        cell_shift_mod.networks[-1].layers[0].biases = deepcopy(
            base_mod.networks[1].layers[0].biases)

    return cell_shift_mod
コード例 #19
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def get_stim_model(Stim,
                   Robs,
                   dims,
                   valid=None,
                   num_lags=10,
                   plot=True,
                   XTreg=0.05,
                   L1reg=5e-3,
                   MIreg=0.1,
                   MSCreg=10.0,
                   Greg=0.1,
                   Mreg=1e-4,
                   num_subs=36,
                   num_hid=24,
                   num_tkern=None,
                   Cindx=None,
                   base_mod=None,
                   cids=None,
                   autoencoder=False):

    NX = dims[0]
    NY = dims[1]

    NT, NC = Robs.shape

    if valid is None:
        valid = np.arange(0, NT, 1)

    # create time-embedded stimulus
    Xstim, rinds = create_time_embedding_valid(Stim, [num_lags, NX, NY], valid)
    Rvalid = deepcopy(Robs[rinds, :])

    NTv = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NTv, NT))

    stas = Xstim.T @ (Rvalid - np.average(Rvalid, axis=0))
    stas = np.reshape(stas, [NX * NY, num_lags, NC]) / NTv

    if plot:
        plt.figure(figsize=(10, 15))
        sx, sy = U.get_subplot_dims(NC)

    mu = np.zeros(NC)
    for cc in range(NC):
        if plot:
            plt.subplot(sx, sy, cc + 1)
            plt.plot(np.abs(stas[:, :, cc]).T, color=[.5, .5, .5])
        tlevel = np.median(
            np.abs(stas[:, :, cc] - np.average(stas[:, :, cc]))) * 4
        mu[cc] = np.average(np.abs(stas[:, :, cc]) > tlevel)

        if plot:
            plt.axhline(tlevel, color='k')
            plt.title(cc)

    # threshold good STAS
    thresh = 0.01
    if plot:
        plt.figure()
        plt.plot(mu, '-o')
        plt.axhline(thresh, color='k')
        plt.show()

    if cids is None:
        cids = np.where(mu > thresh)[0]  # units to analyze
        print("found %d good STAs" % len(cids))

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in cids:
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NY, NX)))
            plt.title(cc)

    # index into "good" units
    Rvalid = Rvalid[:, cids]
    NC = Rvalid.shape[1]
    stas = stas[:, :, cids]

    if Cindx is None:
        print("Getting Crop Index")
        # Crop stimulus to center around RFs
        sumdensity = np.zeros([NX * NY])
        for cc in range(NC):
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            sumdensity += stas[:, bestlag, cc]**2

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity, [NY, NX]))
            plt.title("Sum Density STA")

        # get Crop indices (TODO: debug)
        sumdensity = (sumdensity - np.min(sumdensity)) / (np.max(sumdensity) -
                                                          np.min(sumdensity))
        I = np.reshape(sumdensity, [NY, NX]) > .3
        xinds = np.where(np.sum(I, axis=0) > 0)[0]
        yinds = np.where(np.sum(I, axis=1) > 0)[0]

        NX2 = np.maximum(len(xinds), len(yinds))
        x0 = np.min(xinds)
        y0 = np.min(yinds)

        xinds = range(x0, x0 + NX2)
        yinds = range(y0, y0 + NX2)

        Cindx = crop_indx(NX, xinds, yinds)

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity[Cindx], [NX2, NX2]))
            plt.title('Cropped')
            plt.show()

    NX2 = np.sqrt(len(Cindx)).astype(int)

    # make new cropped stimulus
    Xstim, rinds = create_time_embedding_valid(Stim[:, Cindx],
                                               [num_lags, NX2, NX2], valid)

    # index into Robs
    Rvalid = deepcopy(Robs[rinds, :])
    Rvalid = Rvalid[:, cids]
    Rvalid = NDNutils.shift_mat_zpad(Rvalid, -1, dim=0)  # get rid of first lag

    NC = Rvalid.shape[1]  # new number of units
    NT = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NT, Stim.shape[0]))
    print('%d good units' % NC)

    # double-check STAS work with cropped stimulus
    stas = Xstim.T @ Rvalid
    stas = np.reshape(stas, [NX2 * NX2, num_lags, NC]) / NT

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in range(NC):
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NX2, NX2)))
            plt.title(cc)
        plt.show()

    Ui, Xi = NDNutils.generate_xv_folds(NT)

    # fit SCAFFOLD MODEL
    try:
        if len(XTreg) == 2:
            d2t = XTreg[0]
            d2x = XTreg[1]
        else:
            d2t = XTreg[0]
            d2x = deepcopy(d2t)
    except TypeError:
        d2t = deepcopy(XTreg)
        d2x = deepcopy(XTreg)

    # optimizer parameters
    adam_params = U.def_adam_params()

    if not base_mod is None:
        side2b = base_mod.copy_model()
        side2b.set_regularization('d2t', d2t, layer_target=0)
        side2b.set_regularization('d2x', d2x, layer_target=0)
        side2b.set_regularization('glocal', Greg, layer_target=0)
        side2b.set_regularization('l1', L1reg, layer_target=0)
        side2b.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2b.set_regularization('max',
                                  MSCreg,
                                  ffnet_target=1,
                                  layer_target=0)

        if len(side2b.networks) == 4:  # includes autoencoder network
            input_data = [Xstim, Rvalid]
        else:
            input_data = Xstim

    else:
        # Best regularization arrived at
        Greg0 = 1e-1
        Mreg0 = 1e-6
        L1reg0 = 1e-5

        if not num_tkern is None:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_tkern, num_subs, num_hid],
                layer_types=['conv', 'normal', 'normal'],
                ei_layers=[None, num_subs // 2, num_hid // 2],
                conv_filter_widths=[1],
                normalization=[1, 1, 1],
                act_funcs=['lin', 'relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [1e-3],
                    'd2x': [None, XTreg],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0, Greg0]
                })
        else:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_subs, num_hid],
                layer_types=['normal', 'normal'],
                ei_layers=[num_subs // 2, num_hid // 2],
                normalization=[1, 1],
                act_funcs=['relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [d2t],
                    'd2x': [d2x],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0]
                })

        side_par = NDNutils.ffnetwork_params(network_type='side',
                                             xstim_n=None,
                                             ffnet_n=0,
                                             layer_sizes=[NC],
                                             layer_types=['normal'],
                                             normalization=[-1],
                                             act_funcs=['softplus'],
                                             verbose=True,
                                             reg_list={'max': [Mreg0]})

        side_par[
            'pos_constraints'] = True  # ensures Exc and Inh mean something

        if autoencoder:  # capturea additional variability using autoencoder
            auto_par = NDNutils.ffnetwork_params(
                input_dims=[1, NC, 1],
                xstim_n=[1],
                layer_sizes=[2, 1, NC],
                time_expand=[0, 15, 0],
                layer_types=['normal', 'temporal', 'normal'],
                conv_filter_widths=[None, 1, None],
                act_funcs=['relu', 'lin', 'lin'],
                normalization=[1, 1, 0],
                reg_list={'d2t': [None, 1e-1, None]})

            add_par = NDNutils.ffnetwork_params(xstim_n=None,
                                                ffnet_n=[1, 2],
                                                layer_sizes=[NC],
                                                layer_types=['add'],
                                                act_funcs=['softplus'])

            side2 = NDN.NDN([ndn_par, side_par, auto_par, add_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            # set output regularization on the latent
            side2.batch_size = adam_params['batch_size']
            side2.initialize_output_reg(network_target=2,
                                        layer_target=1,
                                        reg_vals={'d2t': 1e-1})

            input_data = [Xstim, Rvalid]

        else:
            side2 = NDN.NDN([ndn_par, side_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            input_data = Xstim

        _ = side2.train(input_data=input_data,
                        output_data=Rvalid,
                        train_indxs=Ui,
                        test_indxs=Xi,
                        silent=False,
                        learning_alg='adam',
                        opt_params=adam_params)

        side2.set_regularization('glocal', Greg, layer_target=0)
        side2.set_regularization('l1', L1reg, layer_target=0)
        side2.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2.set_regularization('max', MSCreg, ffnet_target=1, layer_target=0)

        side2b = side2.copy_model()

    _ = side2b.train(input_data=input_data,
                     output_data=Rvalid,
                     train_indxs=Ui,
                     test_indxs=Xi,
                     silent=False,
                     learning_alg='adam',
                     opt_params=adam_params)

    LLs2n = side2b.eval_models(input_data=input_data,
                               output_data=Rvalid,
                               data_indxs=Xi,
                               nulladjusted=True)
    print(np.mean(LLs2n))
    if plot:
        plt.hist(LLs2n)
        plt.xlabel('Nats/Spike')
        plt.show()

    return side2b, Xstim, Rvalid, rinds, cids, Cindx
コード例 #20
0
ファイル: BinocUtils.py プロジェクト: lucabaroni/NDN3
def disparity_predictions(Einfo,
                          resp,
                          indxs=None,
                          num_dlags=8,
                          fr1or3=None,
                          spiking=True,
                          rectified=True,
                          opt_params=None):
    """Calculates a prediction of the disparity (and timing) signals that can be inferred from the response
    by the disparity input alone. This puts a lower bound on how much disparity is driving the response, although
    practically speaking will generate the same disparity tuning curves.
    
    Usage: Dpred, Tpred = disparity_predictions( Einfo, resp, indxs, num_dlags=8, spiking=True, rectified=True, opt_params=None )

    Inputs: Indices gives data range to fit to.
    Outputs: Dpred and Tpred will be length of entire experiment -- not just indxs
    """

    # Process disparity into disparty and timing design matrix
    dmat = disparity_matrix(Einfo['dispt'], Einfo['corrt'])
    ND2 = dmat.shape[1]
    if indxs is None:
        indxs = range(dmat.shape[0])

    # everything but blank
    Xd = NDNutils.create_time_embedding(dmat[:, :-1], [num_dlags, ND2 - 1, 1])
    # blank
    Xb = NDNutils.create_time_embedding(dmat[:, -1], [num_dlags, 1, 1])
    # timing
    switches = np.expand_dims(np.concatenate(
        (np.sum(abs(np.diff(dmat, axis=0)), axis=1), [0]), axis=0),
                              axis=1)
    Xs = NDNutils.create_time_embedding(switches, [num_dlags, 1, 1])

    tpar = NDNutils.ffnetwork_params(xstim_n=[0],
                                     input_dims=[1, 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2t': [None],
                                         'l1': [None]
                                     })
    bpar = deepcopy(tpar)
    bpar['xstim_n'] = [1]
    dpar = NDNutils.ffnetwork_params(xstim_n=[2],
                                     input_dims=[1, ND2 - 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2xt': [None],
                                         'l1': [None]
                                     })
    if rectified:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['softplus'])
    else:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['lin'])

    comb_par = deepcopy(comb_parT)
    comb_par['ffnet_n'] = [0, 1, 2]

    if spiking:
        nd = 'poisson'
    else:
        nd = 'gaussian'

    Tglm = NDN.NDN([tpar, bpar, comb_parT], noise_dist=nd, tf_seed=5)
    DTglm = NDN.NDN([tpar, bpar, dpar, comb_par], noise_dist=nd, tf_seed=5)
    v2fT = Tglm.fit_variables(layers_to_skip=[2], fit_biases=False)
    v2fT[2][0]['fit_biases'] = True
    v2f = DTglm.fit_variables(layers_to_skip=[3], fit_biases=False)
    v2f[3][0]['fit_biases'] = True

    if (fr1or3 == 3) or (fr1or3 == 1):
        mod_indxs = np.intersect1d(indxs, np.where(Einfo['frs'] == fr1or3)[0])
        #frs_valid = Einfo['frs'] == fr1or3
    else:
        mod_indxs = indxs
        #frs_valid = Einfo['frs'] > 0
    #to_use = frs_valid[indxs]

    #r = deepcopy(resp[mod_indxs])
    #if len(resp) > len(indxs):
    #    r = deepcopy(resp[indxs])
    #else:
    #    r = deepcopy(resp)

    _ = Tglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :]],
        output_data=resp[mod_indxs],  # fit_variables=v2fT,
        learning_alg='lbfgs',
        opt_params=opt_params)
    _ = DTglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :],
                    Xd[mod_indxs, :]],  # fit_variables=v2f, 
        output_data=resp[mod_indxs],
        learning_alg='lbfgs',
        opt_params=opt_params)
    #p1 = Tglm.eval_models(input_data=Xs[indxs,:], output_data=r)[0]
    #p2 = DTglm.eval_models(input_data=[Xs[indxs,:], Xd[indxs,:]], output_data=r)[0]
    #print( "Model performances: %0.4f  -> %0.4f"%(p1, p2) )

    # make predictions of each
    predT = Tglm.generate_prediction(input_data=[Xs, Xb])
    predD = DTglm.generate_prediction(input_data=[Xs, Xb, Xd])

    return predD, predT
コード例 #21
0
ファイル: generator_net.py プロジェクト: kovacspe/diplomka
    def get_gan_subnet(self,
                       input_noise_size,
                       output_shape,
                       generator_type='conv'):
        output_shape = output_shape[1:]
        layers = 5
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[64, 8, 8], 32, 16, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]

        elif generator_type == 'deepconv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[512, 4, 4], 256, 128, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 2, None],
                reg_list={'d2x': [None, None, None, 0.01, None]},
                verbose=False)
            params['output_shape'] = [None, None, None, output_shape, None]

        elif generator_type == 'lin' or generator_type == 'lin_tanh':
            act = 'lin' if generator_type == 'lin' else 'tanh'
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[512, 1024, [1, 31, 31], 1],
                layer_types=['normal', 'normal', 'normal', 'mask'],
                act_funcs=['tanh', 'tanh', act, 'lin'],
                reg_list={
                    'l2': [0.01, 0.01, 0.01, None],
                },
                verbose=False)
            layers = 4

        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[256, [16, 16, 16], 8, 1, 1],
                layer_types=['normal', 'normal', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = [0]
        params['normalize_output'] = [None] * layers
        params['weights_initializers'] = ['normal'] * (layers - 1) + ['ones']
        params['biases_initializers'] = ['zeros'] * layers
        return params
コード例 #22
0

num_subs=NC
num_hid=num_subs//2
        
# ndn_par = NDNutils.ffnetwork_params( 
#     input_dims=[1,NX2,NX2, num_lags],
#     layer_sizes=[num_subs, num_hid], 
#     layer_types=['normal','normal'], ei_layers=[num_subs//2, num_hid//2],
#     normalization=[1, 1], act_funcs=['relu', 'relu'], verbose=True,
#     reg_list={'d2xt':[XTreg], 'l1':[L1reg0, L1reg0], 'glocal':[Greg0]})

t_layer = NDNutils.ffnetwork_params(
    input_dims=[1,NX2,NX2], time_expand=num_lags,
    layer_sizes=[1],
    layer_types=['temporal'],
    normalization=[1],
    act_funcs=['lin'],
    verbose=True,
    reg_list={'d2t':[0.05]})

ndn_par = NDNutils.ffnetwork_params( 
    xstim_n=None,
    ffnet_n=0,
    input_dims=[1,NX2,NX2],
    layer_sizes=[num_subs, num_hid], 
    layer_types=['normal','normal'],
    normalization=[1, 1],
    act_funcs=['relu', 'relu'],
    verbose=True,
    reg_list={'d2x':[XTreg], 'l1':[L1reg0, L1reg0], 'glocal':[Greg0]})