コード例 #1
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[1, self.width, self.height],
         layer_sizes=[
             self.args['channels'], self.args['channels'],
             int(0.2 * self.out_num), self.out_num
         ],  # paper: 9, 0.2*output_shape
         ei_layers=[None, None, None, None],
         normalization=[0, 0, 0, 0],
         layer_types=['var', 'conv', self.args['hidden_lt'], 'normal'],
         act_funcs=['lin', 'softplus', 'softplus', 'softplus'],
         shift_spacing=[1, (self.args['c_size'] + 1) // 2, 1, 1],
         conv_filter_widths=[0, self.args['c_size'], 0, 0],
         reg_list={
             'd2x': [None, self.args['cd2x'], None, None],
             self.args['hidden_t']:
             [None, None, self.args['hidden_s'], None],
             'l2': [0.1, None, None, 0.1],
         })
     params['weights_initializers'] = [
         'normal', 'normal', 'normal', 'normal'
     ]
     params['biases_initializers'] = [
         'normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     return params
コード例 #2
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[c_filters, c_filters, c_filters,
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=['conv', 'conv', 'conv', 'sep'],
            act_funcs=['softplus', 'softplus', 'softplus', 'softplus'],
            shift_spacing=[2, 2, 2, 0],
            conv_filter_widths=[13, 3, 3, 0],
            reg_list={
                'd2x': [cd2x, cd2x, cd2x, None],
                'max_filt': [max_filt, max_filt, max_filt, None],
                'l1': [None, None, None, l1]
            })
        hsm_params['weights_initializers'] = [
            'normal', 'normal', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #3
0
ファイル: bs4_exp10.py プロジェクト: lucabaroni/msc-neuro
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h], 
            layer_sizes=[c_filters, int(0.2*output_shape), output_shape], # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0], 
            layer_types=['conv_diff_of_gaussians', hidden_lt, 'normal'],
            act_funcs=['lin', 'softplus','softplus'],
            
            shift_spacing=[(c_size+1)//2, 0],
            conv_filter_widths=[c_size, 0, 0],

            reg_list={
                hidden_t:[None, hidden_s, None],
                'l2':[None, None, 0.1],
                })
        hsm_params['weights_initializers']=['random','normal','normal']
        hsm_params['biases_initializers']=['trunc_normal','trunc_normal','trunc_normal']

        return hsm_params
コード例 #4
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(
            math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h],
            layer_sizes=[9, int(hidden * output_shape),
                         output_shape],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [None, reg_h, reg_l],
            })
        hsm_params['weights_initializers'] = ['random', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #5
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                int(self.args['hidden']),
                int(self.args['hidden']),
                int(0.2 * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=[
                'var', 'conv_diff_of_gaussians', self.args['layer'], 'normal'
            ],
            act_funcs=['lin', 'lin', 'softplus', 'softplus'],
            shift_spacing=[1, (self.args['c_size'] + 1) // 2, 1, 1],
            conv_filter_widths=[
                self.args['c_size'], self.args['c_size'], 0, 0
            ],
            reg_list={
                'l2': [0.1, None, None, 0.1],
                'l1': [None, None, self.args['reg_h'], None],
            })
        hsm_params['weights_initializers'] = [
            'normal', 'random', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #6
0
ファイル: generator_net.py プロジェクト: kovacspe/diplomka
    def get_encoder(self,
                    noise_size,
                    input_shape,
                    ffnet_in,
                    generator_type='conv'):
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'conv', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[1, 2, 2, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'lin':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['normal', 'normal', 'normal'],
                act_funcs=[
                    'relu',
                    'relu',
                    'relu',
                ],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=input_shape,
                layer_sizes=[8, 8, 16, noise_size],
                layer_types=['conv', 'conv', 'normal', 'normal'],
                act_funcs=['relu', 'relu', 'relu', 'lin'],
                conv_filter_widths=[5, 5, 7, None],
                shift_spacing=[2, 2, None, None],
                reg_list={'d2x': [0.1, 0.1, None, None]},
                verbose=False)
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = None
        params['ffnet_n'] = [ffnet_in]
        return params
コード例 #7
0
    def get_hsm_params_custom(input, output, i):
        _, output_shape = output.shape
        _, input_shape = input.shape
        pprint(f"in: {input_shape} out: {output_shape}")

        intput_w, input_h = int(math.sqrt(input_shape)), int(math.sqrt(input_shape))
        hsm_params = NDNutils.ffnetwork_params(
            verbose=False,
            input_dims=[1, intput_w, input_h], 
            layer_sizes=[output_shape], # paper: 9, 0.2*output_shape
            ei_layers=[None],
            normalization=[0], 
            layer_types=['normal'],
            act_funcs=['softplus'],
            reg_list={
                'd2x':[reg_l],
                })
        hsm_params['weights_initializers']=['normal']
        hsm_params['biases_initializers']=['trunc_normal']

        return hsm_params
コード例 #8
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                int(self.args['hidden'] * self.out_num),
                int(self.args['hidden'] * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None],
            normalization=[0, 0, 0],
            layer_types=['var', 'normal', 'normal'],
            act_funcs=['lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [0.1, None, self.args['reg_l']],
                'd2x': [None, self.args['reg_h'], None],
            })
        hsm_params['weights_initializers'] = ['normal', 'normal', 'normal']
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #9
0
    def get_params(self):
        hsm_params = NDNutils.ffnetwork_params(
            input_dims=[1, self.width, self.height],
            layer_sizes=[
                self.args['filt_size'], self.args['filt_size'],
                int(self.args['perc_output'] * self.out_num), self.out_num
            ],  # paper: 9, 0.2*output_shape
            ei_layers=[None, None, None, None],
            normalization=[0, 0, 0, 0],
            layer_types=['var', 'diff_of_gaussians', 'normal', 'normal'],
            act_funcs=['lin', 'lin', 'softplus', 'softplus'],
            reg_list={
                'l2': [0.1, None, None, 0.1],
            })
        hsm_params['weights_initializers'] = [
            'normal', 'random', 'normal', 'normal'
        ]
        hsm_params['biases_initializers'] = [
            'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
        ]

        return hsm_params
コード例 #10
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[1, self.width, self.height],
         layer_sizes=[
             self.args['channels'], self.args['channels'],
             self.args['channels'], self.out_num
         ],
         layer_types=['conv', 'conv', 'conv', 'sep'],
         act_funcs=['softplus', 'softplus', 'lin', 'softplus'],
         shift_spacing=[1, 1, 1, None],
         reg_list={
             #'d2x': [0.03, 0.015, 0.015, None],
             'l1': [None, None, None, 0.02]
         })
     params['conv_filter_widths'] = [13, 5, 5, None]
     params['weights_initializers'] = [
         'trunc_normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     params['bias_initializers'] = [
         'zeros', 'zeros', 'zeros', 'trunc_normal'
     ]
     params['pos_constraint'] = [False, False, False, True]
     return params
コード例 #11
0
 def get_params(self):
     params = NDNutils.ffnetwork_params(
         input_dims=[10],
         layer_sizes=[[31, 31], 30,
                      int(0.2 * self.out_num),
                      self.out_num],  # paper: 9, 0.2*output_shape
         ei_layers=[None, None, None, None],
         normalization=[0, 0, 0, 0],
         layer_types=['normal', 'conv', 'conv', 'normal'],
         act_funcs=['lin', 'softplus', 'softplus', 'softplus'],
         shift_spacing=[1, (7 + 1) // 2, 1, 1],
         conv_filter_widths=[0, 7, 0, 0],
         reg_list={
             'd2x': [None, 0.2, None, None],
             'l2': [0.1, None, None, 0.1],
         })
     params['weights_initializers'] = [
         'normal', 'normal', 'normal', 'normal'
     ]
     params['biases_initializers'] = [
         'normal', 'trunc_normal', 'trunc_normal', 'trunc_normal'
     ]
     return params
コード例 #12
0
#%%
import warnings; warnings.simplefilter('ignore')

import sys
sys.path.insert(0, '/home/jake/Data/Repos/')

import V1FreeViewingCode.Analysis.notebooks.Utils as U
import V1FreeViewingCode.Analysis.notebooks.gratings as gt


import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()

import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)

import tensorflow as tf

import V1FreeViewingCode.Analysis.notebooks.neureye as ne
import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

output_dir = '/home/jake/Data/tensorboard/tensorboard' + str(which_gpu)
print(output_dir)

import numpy as np
from scipy.ndimage import gaussian_filter
from copy import deepcopy
コード例 #13
0
#%% set paths
import sys
sys.path.insert(0, '/home/jake/Data/Repos/')
# import deepdish as dd
import Utils as U
import V1FreeViewingCode.Analysis.notebooks.gratings as gt

import warnings
warnings.simplefilter('ignore')
import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()
from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
# import seaborn as sns

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

# %% load example sessions

datadir = '/home/jake/Data/Datasets/MitchellV1FreeViewing/MT_RF/'
# fname = 'Ellie_190120_0_0_30_30_1.mat'
fname = 'Ellie_190120_0_0_30_30_2.mat'

matdat = gt.loadmat(datadir + fname)
コード例 #14
0
# %% Import libraries
sys.path.insert(0, '/home/jake/Data/Repos/')
import deepdish as dd
import Utils as U
import gratings as gt

import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()
from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

# %% list sessions
sesslist = gt.list_sessions()
sesslist = list(sesslist)
for i in range(len(sesslist)):
    print("%d %s" % (i, sesslist[i]))

# %% Load one session
indexlist = [51]
stim, sacon, sacoff, Robs, DF, basis, opts, sacbc, valid, eyepos = gt.load_and_setup(
    indexlist, npow=1.8)
コード例 #15
0
#%%
import warnings

from numpy.lib.type_check import _nan_to_num_dispatcher
warnings.simplefilter('ignore')

import sys
sys.path.insert(0, '/home/jake/Data/Repos/')

import V1FreeViewingCode.Analysis.notebooks.Utils as U
import V1FreeViewingCode.Analysis.notebooks.gratings as gt

import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()

import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)

from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

import NDN3.NDN as NDN
コード例 #16
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def get_corr_grid(base_mod,
                  Stim,
                  Robs,
                  dims,
                  cids,
                  eyeAtFrame,
                  valid,
                  valid_eye_rad=5.2,
                  Erad=0.75,
                  Npos=25,
                  crop_edge=3,
                  plot=True,
                  interpolation_steps=1,
                  softmax=10,
                  autoencoder=False):
    '''
        get correction grid
    '''
    from tqdm import tqdm  # progress bar

    eyeX = eyeAtFrame[:, 0]
    eyeY = eyeAtFrame[:, 1]

    NX = dims[0]
    NY = dims[1]

    # get valid indices when eye position is within a specified radius
    eyeVal = np.hypot(eyeX, eyeY) < valid_eye_rad
    valdata = np.intersect1d(valid, np.where(eyeVal)[0])

    num_lags = base_mod.network_list[0]['input_dims'][-1]

    # recreate full Xstim
    # Xstim = NDNutils.create_time_embedding(Stim, [num_lags, NX, NY], tent_spacing=1)
    Xstim, rinds = create_time_embedding_valid(Stim, [num_lags, NX, NY],
                                               valdata)
    Rvalid = deepcopy(Robs[rinds, :])
    Rvalid = Rvalid[:, cids]
    Rvalid = NDNutils.shift_mat_zpad(Rvalid, -1, dim=0)  # get rid of first lag
    NC = Rvalid.shape[1]
    eyeX = eyeX[rinds]
    eyeY = eyeY[rinds]

    # KEEP THIS INCASE YOU WANT TO USE AUTOENCODER LATER
    # # old network list
    # netlist_old = deepcopy(base_mod.network_list)[0]

    # # convert scaffold network into a convolutional network
    # scaff_par = deepcopy(base_mod.network_list[0])
    # scaff_par['input_dims'] = [1, NX, NY] + [scaff_par['input_dims'][-1]]
    # scaff_par['layer_types']=['conv', 'conv']
    # scaff_par['conv_filter_widths'] = [netlist_old['input_dims'][1], 1] # base_mod

    # side_par = deepcopy(base_mod.network_list[1])

    # if len(base_mod.networks) > 2: # it has an autoencoder
    #     print('found extra FFNetworks. Assuming they are an autoencoder')

    #     # side_par['layer_sizes'][-1] = [NC,NX,NY]
    #     side_par['layer_types'] = ['conv']
    #     side_par['conv_filter_widths'] = [1]
    #     auto_par = deepcopy(base_mod.network_list[2])
    #     auto_par['layer_sizes'][-1] = [NC,NX,NY]
    #     add_par = deepcopy(base_mod.network_list[3])
    #     add_par['input_dims'] = None
    #     add_par['layer_sizes'][-1] = NC*NX*NY

    #     if autoencoder:
    #         cell_shift_mod = NDN.NDN( [scaff_par, side_par, auto_par, add_par], ffnet_out=3, noise_dist='poisson')
    #     else:
    #         cell_shift_mod = NDN.NDN( [scaff_par, side_par], ffnet_out=1, noise_dist='poisson' )
    # else:
    #     side_par['layer_types'] = ['conv']
    #     side_par['conv_filter_widths'] = [1]
    #     autoencoder = False
    #     cell_shift_mod = NDN.NDN( [scaff_par, side_par], ffnet_out=1, noise_dist='poisson' )

    # num_space = np.prod(cell_shift_mod.input_sizes[0][:-1])

    # # copy first network verbatim (only thing diff is output is a convolution)
    # for nl in range(len(cell_shift_mod.networks[0].layers)):
    #     cell_shift_mod.networks[0].layers[nl].weights = deepcopy(base_mod.networks[0].layers[nl].weights)
    #     cell_shift_mod.networks[0].layers[nl].biases = deepcopy(base_mod.networks[0].layers[nl].biases)

    # if autoencoder:

    #     # side par
    #     cell_shift_mod.networks[1].layers[0].weights = deepcopy(base_mod.networks[1].layers[0].weights)
    #     cell_shift_mod.networks[1].layers[0].biases = deepcopy(base_mod.networks[1].layers[0].biases)
    #     # autoencoder
    #     for nl in range(len(cell_shift_mod.networks[1].layers)-1):
    #         cell_shift_mod.networks[2].layers[nl].weights = deepcopy(base_mod.networks[2].layers[nl].weights)
    #         cell_shift_mod.networks[2].layers[nl].biases = deepcopy(base_mod.networks[2].layers[nl].biases)
    #     # expand output
    #     cell_shift_mod.networks[2].layers[-1].weights = conv_expand(deepcopy(base_mod.networks[2].layers[-1].weights), num_space)
    #     cell_shift_mod.networks[2].layers[-1].biases = conv_expand(deepcopy(base_mod.networks[2].layers[-1].biases), num_space)

    #     # add_par --> do I need to do anything?

    # else: # expansion is handled with a conv layer so just copy
    #     cell_shift_mod.networks[1].layers[0].weights = deepcopy(base_mod.networks[1].layers[0].weights)
    #     cell_shift_mod.networks[1].layers[0].biases = deepcopy(base_mod.networks[1].layers[0].biases)

    cell_shift_mod = make_model_convolutional(base_mod, [NX, NY])
    num_space = np.prod(cell_shift_mod.input_sizes[0][:-1])

    # locations in the grid
    locs = np.linspace(-valid_eye_rad, valid_eye_rad, Npos)
    print(locs)

    # loop over grid and calculate likelihood surfaces
    LLspace1 = np.zeros([Npos, Npos, NY - 2 * crop_edge, NX - 2 * crop_edge])

    # Loop over positions (this is the main time-consuming operation)
    for xx in tqdm(range(Npos)):
        for yy in range(Npos):

            # get index for when the eye position was withing the boundaries
            rs = np.hypot(eyeX - locs[xx], eyeY - locs[yy])

            # eccentricity dependent
            ecc = np.hypot(locs[xx], locs[yy])
            # Ethresh = Erad + .2*ecc # eccentricity dependent threshold
            Ethresh = Erad
            # valE = np.where(rs < (Erad + ecc*.5)[0]
            valE = np.where(rs < Ethresh)[0]
            valtot = valE
            # valtot = np.intersect1d(valdata, valE)

            if len(valtot
                   ) > 100:  # at least 100 samples to evaluate a likelihood

                Rcc = conv_expand(Rvalid[valtot, :], num_space)

                # print("RCC shape (%d, %d)" %Rcc.shape)
                # print("Model Output = %d" %cell_shift_mod.output_sizes[0])
                # get negative log-likelihood at all spatial shifts
                if autoencoder:
                    LLs = cell_shift_mod.eval_models(
                        input_data=[Xstim[valtot, :], Rvalid[valtot, :]],
                        output_data=Rcc,
                        nulladjusted=False)
                else:
                    LLs = cell_shift_mod.eval_models(
                        input_data=[Xstim[valtot, :]],
                        output_data=Rcc,
                        nulladjusted=False)

                # reshape into spatial map
                LLcc = np.reshape(LLs, [NY, NX, NC])

                LLpos = np.mean(LLcc, axis=2)
                if crop_edge == 0:
                    LLspace1[xx, yy, :, :] = deepcopy(LLpos)
                else:
                    LLspace1[xx, yy, :, :] = deepcopy(LLpos)[
                        crop_edge:-crop_edge, :][:, crop_edge:-crop_edge]

    if plot:  # plot the recovered likelihood surfaces
        plt.figure(figsize=(15, 15))

        for xx in range(Npos):
            for yy in range(Npos):
                if LLspace1[xx][yy] is not None:
                    ax = plt.subplot(Npos, Npos, yy * Npos + xx + 1)
                    plt.imshow(LLspace1[xx, yy, :, :])
                    plt.axvline(NX // 2 - crop_edge, color='k')
                    plt.axhline(NY // 2 - crop_edge, color='k')

                    ax.set_xticks([])
                    ax.set_yticks([])

        plt.show()

    centers5, LLspace3 = get_centers_from_LLspace(
        LLspace1, interpolation_steps=interpolation_steps, crop_edge=crop_edge)

    return centers5, locs, LLspace1
コード例 #17
0
# %% Import libraries
sys.path.insert(0, '/home/jake/Data/Repos/')
import deepdish as dd
import Utils as U
import gratings as gt

import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()
from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

# %% list sessions
sesslist = gt.list_sessions()
sesslist = list(sesslist)
for i in range(len(sesslist)):
    print("%d %s" % (i, sesslist[i]))

# %% Load one session
sessid = 51

matdat = gt.load_data(sessid)
コード例 #18
0
#%%
import warnings
warnings.simplefilter('ignore')

import sys
sys.path.insert(0, '/home/jake/Data/Repos/')

import V1FreeViewingCode.Analysis.notebooks.Utils as U
import V1FreeViewingCode.Analysis.notebooks.gratings as gt

import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()

import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)

import tensorflow as tf

import V1FreeViewingCode.Analysis.notebooks.neureye as ne
import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

output_dir = '/home/jake/Data/tensorboard/tensorboard' + str(which_gpu)
print(output_dir)

import numpy as np
from scipy.ndimage import gaussian_filter
from copy import deepcopy
コード例 #19
0
ファイル: BinocUtils.py プロジェクト: lucabaroni/NDN3
def binocular_data_import(datadir, expt_num):
    """Usage: stim, Robs, DFs, used_inds, Eadd_info = binocular_data_import( datadir, expt_num )

    Inputs:
        datadir: directory on local drive where datafiles are
        expt_num: the experiment number (1-12) representing the binocular experiments we currently have. All
                    datafiles are called 'BS2expt?.mat'. Note that numbered from 1 (not zero)
    
    Outputs:
        stim: formatted as NT x 72 (stimuli in each eye are cropped to NX=36). It will be time-shifted by 1 to 
                eliminate 0-latency stim
        Robs: response concatenating all SUs and MUs, NT x NC. NSU is saved as part of Eadd_info
        DFs:  data_filters for experiment, also NT x NC (note MU datafilters are initialized to 1)
        used_inds: indices overwhich data is valid according to initial data parsing (adjusted to python) 
        Eadd_info: dictionary containing all other relevant info for experiment
    """

    # Constants that are pretty much set for our datasets
    stim_trim = np.concatenate((range(3, 39), range(45, 81)))
    time_shift = 1
    NX = 36

    # Read all data into memory
    filename = 'B2Sexpt' + str(expt_num) + '.mat'
    Bmatdat = sio.loadmat(datadir + filename)
    stim = NDNutils.shift_mat_zpad(Bmatdat['stim'][:, stim_trim], time_shift,
                                   0)

    #NX = int(stim.shape[1]) // 2
    RobsSU = Bmatdat['RobsSU']
    RobsMU = Bmatdat['RobsMU']
    #MUA = Bmatdata[nn]['RobsMU']
    #SUA = Bmatdata[nn]['RobsSU']
    NTtot, numSUs = RobsSU.shape

    data_filtersSU = Bmatdat['SUdata_filter']
    data_filtersMU = np.ones(
        RobsMU.shape, dtype='float32'
    )  # note currently no MUdata filter but their needs to be....

    Robs = np.concatenate((RobsSU, RobsMU), axis=1)
    DFs = np.concatenate((data_filtersSU, data_filtersMU), axis=1)

    # Valid and cross-validation indices
    used_inds = np.add(np.transpose(Bmatdat['used_inds'])[0, :],
                       -1)  # note adjustment for python v matlab indexing
    Ui_analog = Bmatdat['Ui_analog'][:,
                                     0]  # these are automaticall in register
    XiA_analog = Bmatdat['XiA_analog'][:, 0]
    XiB_analog = Bmatdat['XiB_analog'][:, 0]
    # two cross-validation datasets -- for now combine
    Xi_analog = XiA_analog + XiB_analog  # since they are non-overlapping, will make 1 in both places

    # Derive full-dataset Ui and Xi from analog values
    Ui = np.intersect1d(used_inds, np.where(Ui_analog > 0)[0])
    Xi = np.intersect1d(used_inds, np.where(Xi_analog > 0)[0])

    NC = Robs.shape[1]
    NT = len(used_inds)

    dispt_raw = NDNutils.shift_mat_zpad(Bmatdat['all_disps'][:, 0], time_shift,
                                        0)  # time shift to keep consistent
    # this has the actual disparity values, which are at the resolution of single bars, and centered around the neurons
    # disparity (sometime shifted to drive neurons well)
    # Sometimes a slightly disparity is used, so it helps to round the values at some resolution
    dispt = np.round(dispt_raw * 100) / 100
    frs = NDNutils.shift_mat_zpad(Bmatdat['all_frs'][:, 0], time_shift, 0)
    corrt = NDNutils.shift_mat_zpad(Bmatdat['all_corrs'][:, 0], time_shift, 0)
    # Make dispt consistent with corrt (early experiments had dispt labeled incorrectly)
    corr_funny = np.where((corrt == 0) & (dispt != -1005))[0]
    if len(corr_funny) > 0:
        print("Warning: %d indices have corr=0 but labeled disparity." %
              len(corr_funny))
        dispt[corr_funny] = -1005

    disp_list = np.unique(dispt)
    # where it is -1009 this corresponds to a blank frame
    # where it is -1005 this corresponds to uncorrelated images between the eyes

    if Bmatdat['rep_inds'] is None:
        #rep_inds = [None]*numSUs
        rep_inds = None
    elif len(Bmatdat['rep_inds'][0][0]) < 10:
        rep_inds = None
    else:
        rep_inds = []
        for cc in range(numSUs):
            rep_inds.append(np.add(Bmatdat['rep_inds'][0][cc], -1))

    print("Expt %d: %d SUs, %d total units, %d out of %d time points used." %
          (expt_num, numSUs, NC, NT, NTtot))
    #print(len(disp_list), 'different disparities:', disp_list)

    Eadd_info = {
        'Ui_analog': Ui_analog,
        'XiA_analog': XiA_analog,
        'XiB_analog': XiB_analog,
        'Xi_analog': Xi_analog,
        'Ui': Ui,
        'Xi':
        Xi,  # these are for use with data_filters and inclusion of whole experiment,
        'NSU': numSUs,
        'NC': NC,
        'dispt': dispt,
        'corrt': corrt,
        'disp_list': disp_list,
        'frs': frs,
        'rep_inds': rep_inds
    }

    return stim, Robs, DFs, used_inds, Eadd_info
コード例 #20
0
ファイル: BinocUtils.py プロジェクト: lucabaroni/NDN3
def disparity_predictions(Einfo,
                          resp,
                          indxs=None,
                          num_dlags=8,
                          fr1or3=None,
                          spiking=True,
                          rectified=True,
                          opt_params=None):
    """Calculates a prediction of the disparity (and timing) signals that can be inferred from the response
    by the disparity input alone. This puts a lower bound on how much disparity is driving the response, although
    practically speaking will generate the same disparity tuning curves.
    
    Usage: Dpred, Tpred = disparity_predictions( Einfo, resp, indxs, num_dlags=8, spiking=True, rectified=True, opt_params=None )

    Inputs: Indices gives data range to fit to.
    Outputs: Dpred and Tpred will be length of entire experiment -- not just indxs
    """

    # Process disparity into disparty and timing design matrix
    dmat = disparity_matrix(Einfo['dispt'], Einfo['corrt'])
    ND2 = dmat.shape[1]
    if indxs is None:
        indxs = range(dmat.shape[0])

    # everything but blank
    Xd = NDNutils.create_time_embedding(dmat[:, :-1], [num_dlags, ND2 - 1, 1])
    # blank
    Xb = NDNutils.create_time_embedding(dmat[:, -1], [num_dlags, 1, 1])
    # timing
    switches = np.expand_dims(np.concatenate(
        (np.sum(abs(np.diff(dmat, axis=0)), axis=1), [0]), axis=0),
                              axis=1)
    Xs = NDNutils.create_time_embedding(switches, [num_dlags, 1, 1])

    tpar = NDNutils.ffnetwork_params(xstim_n=[0],
                                     input_dims=[1, 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2t': [None],
                                         'l1': [None]
                                     })
    bpar = deepcopy(tpar)
    bpar['xstim_n'] = [1]
    dpar = NDNutils.ffnetwork_params(xstim_n=[2],
                                     input_dims=[1, ND2 - 1, 1, num_dlags],
                                     layer_sizes=[1],
                                     verbose=False,
                                     layer_types=['normal'],
                                     act_funcs=['lin'],
                                     reg_list={
                                         'd2xt': [None],
                                         'l1': [None]
                                     })
    if rectified:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['softplus'])
    else:
        comb_parT = NDNutils.ffnetwork_params(xstim_n=None,
                                              ffnet_n=[0, 1],
                                              layer_sizes=[1],
                                              verbose=False,
                                              layer_types=['normal'],
                                              act_funcs=['lin'])

    comb_par = deepcopy(comb_parT)
    comb_par['ffnet_n'] = [0, 1, 2]

    if spiking:
        nd = 'poisson'
    else:
        nd = 'gaussian'

    Tglm = NDN.NDN([tpar, bpar, comb_parT], noise_dist=nd, tf_seed=5)
    DTglm = NDN.NDN([tpar, bpar, dpar, comb_par], noise_dist=nd, tf_seed=5)
    v2fT = Tglm.fit_variables(layers_to_skip=[2], fit_biases=False)
    v2fT[2][0]['fit_biases'] = True
    v2f = DTglm.fit_variables(layers_to_skip=[3], fit_biases=False)
    v2f[3][0]['fit_biases'] = True

    if (fr1or3 == 3) or (fr1or3 == 1):
        mod_indxs = np.intersect1d(indxs, np.where(Einfo['frs'] == fr1or3)[0])
        #frs_valid = Einfo['frs'] == fr1or3
    else:
        mod_indxs = indxs
        #frs_valid = Einfo['frs'] > 0
    #to_use = frs_valid[indxs]

    #r = deepcopy(resp[mod_indxs])
    #if len(resp) > len(indxs):
    #    r = deepcopy(resp[indxs])
    #else:
    #    r = deepcopy(resp)

    _ = Tglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :]],
        output_data=resp[mod_indxs],  # fit_variables=v2fT,
        learning_alg='lbfgs',
        opt_params=opt_params)
    _ = DTglm.train(
        input_data=[Xs[mod_indxs, :], Xb[mod_indxs, :],
                    Xd[mod_indxs, :]],  # fit_variables=v2f, 
        output_data=resp[mod_indxs],
        learning_alg='lbfgs',
        opt_params=opt_params)
    #p1 = Tglm.eval_models(input_data=Xs[indxs,:], output_data=r)[0]
    #p2 = DTglm.eval_models(input_data=[Xs[indxs,:], Xd[indxs,:]], output_data=r)[0]
    #print( "Model performances: %0.4f  -> %0.4f"%(p1, p2) )

    # make predictions of each
    predT = Tglm.generate_prediction(input_data=[Xs, Xb])
    predD = DTglm.generate_prediction(input_data=[Xs, Xb, Xd])

    return predD, predT
コード例 #21
0
ファイル: BinocUtils.py プロジェクト: lucabaroni/NDN3
def disparity_tuning(Einfo,
                     r,
                     used_inds=None,
                     num_dlags=8,
                     fr1or3=3,
                     to_plot=False):

    if used_inds is None:
        used_inds = range(len(r))

    dmat = disparity_matrix(Einfo['dispt'], Einfo['corrt'])
    ND = (dmat.shape[1] - 2) // 2

    # Weight all by their frequency of occurance

    if (fr1or3 == 3) or (fr1or3 == 1):
        frs_valid = Einfo['frs'] == fr1or3
    else:
        frs_valid = Einfo['frs'] > 0

    to_use = frs_valid[used_inds]

    #dmatN = dmat / np.mean(dmat[used_inds[to_use],:], axis=0) * np.mean(dmat[used_inds[to_use],:])
    dmatN = dmat / np.mean(dmat[used_inds[to_use], :],
                           axis=0)  # will be stim rate
    # if every stim resulted in 1 spk, the would be 1 as is
    #nrms = np.mean(dmat[used_inds[to_use],:], axis=0) # number of stimuli of each type
    Xmat = NDNutils.create_time_embedding(dmatN[:, range(ND * 2)],
                                          [num_dlags, 2 * ND, 1])[used_inds, :]
    # uncorrelated response
    Umat = NDNutils.create_time_embedding(dmatN[:, [-2]],
                                          [num_dlags, 1, 1])[used_inds, :]

    #if len(r) > len(used_inds):
    resp = deepcopy(r[used_inds])
    #else:
    #    resp = r

    #Nspks = np.sum(resp[to_use, :], axis=0)
    Nspks = len(
        to_use
    )  # this will end up being number of spikes associated with each stim
    # at different lags, divided by number of time points used. (i.e. prob of spike per bin)
    Dsta = np.reshape(Xmat[to_use, :].T @ resp[to_use],
                      [2 * ND, num_dlags]) / Nspks
    Usta = (Umat[to_use, :].T @ resp[to_use])[:, 0] / Nspks

    # Rudimentary analysis
    best_lag = np.argmax(np.max(Dsta[range(ND), :], axis=0))
    Dtun = np.reshape(Dsta[:, best_lag], [2, ND]).T
    uncor_resp = Usta[best_lag]

    Dinfo = {
        'Dsta': Dsta,
        'Dtun': Dtun,
        'uncor_resp': uncor_resp,
        'best_lag': best_lag,
        'uncor_sta': Usta,
        'disp_list': Einfo['disp_list'][2:]
    }

    if to_plot:
        DU.subplot_setup(1, 2)
        plt.subplot(1, 2, 1)
        DU.plot_norm(Dsta.T - uncor_resp, cmap='bwr')
        plt.plot([ND - 0.5, ND - 0.5], [-0.5, num_dlags - 0.5], 'k')
        plt.plot([-0.5, 2 * ND - 0.5], [best_lag, best_lag], 'k--')
        plt.subplot(1, 2, 2)
        plt.plot(Dtun)
        plt.plot(-Dtun[:, 1] + 2 * uncor_resp, 'm--')

        plt.plot([0, ND - 1], [uncor_resp, uncor_resp], 'k')
        plt.xlim([0, ND - 1])
        plt.show()

    return Dinfo
コード例 #22
0
# opts['stim'] = stim
# opts['sacon'] = sacon
# opts['Robs'] = Robs

# from scipy.io import savemat

# savemat('testdata.mat', opts)

# %% build time-embedded stimulus
num_saclags = 60
back_shifts = 20
num_lags = 15
NX,NY = opts['NX'],opts['NY']
NT,NC=Robs.shape
# build time-embedded stimulus
Xstim = NDNutils.create_time_embedding( stim, [num_lags, NX, NY], tent_spacing=1 )
# XsacOn = NDNutils.create_time_embedding( NDNutils.shift_mat_zpad(sacon,-back_shifts,dim=0), [num_saclags, 1, 1], tent_spacing=1)
# XsacOff = NDNutils.create_time_embedding( NDNutils.shift_mat_zpad(sacoff,-back_shifts,dim=0), [num_saclags, 1, 1], tent_spacing=1)
# XsacOnCausal = NDNutils.create_time_embedding( sacon, [num_saclags, 1, 1], tent_spacing=1)
# XsacOffCausal = NDNutils.create_time_embedding( sacoff, [num_saclags, 1, 1], tent_spacing=1)
Robs = Robs.astype('float32')
Xstim = Xstim.astype('float32')

#%% optimization params
Ui = opts['Ui']
Xi = opts['Xi']
Ti = opts['Ti']

#%%
class MyModel(torch.nn.Module):
    def __init__(self,D_in,D_out):
コード例 #23
0
# %% Import libraries
sys.path.insert(0, '/home/jake/Data/Repos/')
# import deepdish as dd
import Utils as U
import gratings as gt

import warnings
warnings.simplefilter('ignore')
import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()
from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

# %% list sessions
sesslist = gt.list_sessions()
sesslist = list(sesslist)
for i in range(len(sesslist)):
    print("%d %s" % (i, sesslist[i]))

# %% setup session
for iSess in range(1):  #range(len(sesslist)):
コード例 #24
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def make_model_convolutional(base_mod, dims):

    NX = dims[0]
    NY = dims[1]
    NC = base_mod.output_sizes[0]

    # find networks that process the stimulus
    stim_nets = [
        nn for nn in range(len(base_mod.network_list))
        if base_mod.network_list[nn]['xstim_n'] is not None
    ]

    par = []

    for ss in stim_nets:

        netlist_old = deepcopy(base_mod.network_list)[ss]

        # convert stimulus network params into a convolutional network
        conv_par = deepcopy(base_mod.network_list[ss])
        conv_par['input_dims'] = [1, NX, NY] + [conv_par['input_dims'][-1]]
        conv_par['layer_types'] = ['conv']
        conv_par['conv_filter_widths'] = [netlist_old['input_dims'][1]]

        par.append(deepcopy(conv_par))

    out_net = deepcopy(base_mod.network_list[-1])
    if out_net['layer_types'][0] == 'add':
        add_par = NDNutils.ffnetwork_params(
            xstim_n=None,
            ffnet_n=stim_nets,
            layer_sizes=[NX * NY * NC],
            layer_types=['add'],
            act_funcs=out_net['activation_funcs'])
        par.append(add_par)

    elif out_net['layer_types'][0] == 'side':
        out_net['layer_types'] = ['conv']
        out_net['conv_filter_widths'] = [1]
        par.append(out_net)

    cell_shift_mod = NDN.NDN(par)

    num_space = np.prod(cell_shift_mod.input_sizes[0][:-1])

    # copy stim networks verbatim (only thing diff is output is a convolution)
    for ff in stim_nets:
        for nl in range(len(cell_shift_mod.networks[ff].layers)):
            cell_shift_mod.networks[ff].layers[nl].weights = deepcopy(
                base_mod.networks[ff].layers[nl].weights)
            cell_shift_mod.networks[ff].layers[nl].biases = deepcopy(
                base_mod.networks[ff].layers[nl].biases)

    if base_mod.networks[-1].layers[0].weights.shape[0] == len(stim_nets):
        cell_shift_mod.networks[-1].layers[0].weights = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].weights), num_space)
        cell_shift_mod.networks[-1].layers[0].biases = conv_expand(
            deepcopy(base_mod.networks[-1].layers[0].biases), num_space)
    else:  # convolutional output instead of add layer
        # copy output weights
        cell_shift_mod.networks[-1].layers[0].weights = deepcopy(
            base_mod.networks[1].layers[0].weights)
        cell_shift_mod.networks[-1].layers[0].biases = deepcopy(
            base_mod.networks[1].layers[0].biases)

    return cell_shift_mod
コード例 #25
0
#%% set paths
sys.path.insert(0, '/home/jake/Data/Repos/')
# import deepdish as dd
import V1FreeViewingCode.Analysis.notebooks.Utils as U
import V1FreeViewingCode.Analysis.notebooks.gratings as gt

import warnings

warnings.simplefilter('ignore')
import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()
from scipy.ndimage import gaussian_filter
from copy import deepcopy

import numpy as np
import tensorflow as tf

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

# %% load example sessions
sesslist = ['ellie_20190107', 'ellie_20170731', 'logan_20200304']

ROIs = {
    'ellie_20190107': np.array([-14.0, -3.5, -6.5, 3.5]),
    'ellie_20170731': np.array([-1, -3, 3, .5]),  #np.array([0.0, -2.5, 2, 0])
    'logan_20200304': np.array([-.5, -1.5, 1.5, 0.5])
コード例 #26
0
ファイル: BinocUtils.py プロジェクト: petrroll/NDN3
def disparity_tuning(Einfo,
                     r,
                     used_inds=None,
                     num_dlags=8,
                     fr1or3=3,
                     to_plot=False):

    if used_inds is None:
        used_inds = range(len(r))

    dmat = disparity_matrix(Einfo['dispt'], Einfo['corrt'])
    ND = (dmat.shape[1] - 2) // 2

    # Weight all by their frequency of occurance

    if (fr1or3 == 3) or (fr1or3 == 1):
        frs_valid = Einfo['frs'] == fr1or3
    else:
        frs_valid = Einfo['frs'] > 0

    to_use = frs_valid[used_inds]

    dmatN = dmat / np.mean(dmat[used_inds[to_use], :], axis=0) * np.mean(
        dmat[used_inds[to_use], :])
    Xmat = NDNutils.create_time_embedding(dmatN[:, range(ND * 2)],
                                          [num_dlags, 2 * ND, 1])[used_inds, :]
    # uncorrelated response
    Umat = NDNutils.create_time_embedding(dmatN[:, [-2]],
                                          [num_dlags, 1, 1])[used_inds, :]

    #if len(r) > len(used_inds):
    resp = deepcopy(r[used_inds])
    #else:
    #    resp = r

    Nspks = np.sum(resp[to_use, :], axis=0)
    Dsta = np.reshape(Xmat[to_use, :].T @ resp[to_use],
                      [2 * ND, num_dlags]) / Nspks
    Usta = (Umat[to_use, :].T @ resp[to_use])[:, 0] / Nspks

    # Rudimentary analysis
    best_lag = np.argmax(np.max(Dsta[range(ND), :], axis=0))
    Dtun = np.reshape(Dsta[:, best_lag], [2, ND]).T
    uncor_resp = Usta[best_lag]

    Dinfo = {
        'Dsta': Dsta,
        'Dtun': Dtun,
        'uncor_resp': uncor_resp,
        'best_lag': best_lag,
        'uncor_sta': Usta
    }

    if to_plot:
        DU.subplot_setup(1, 2)
        plt.subplot(1, 2, 1)
        DU.plot_norm(Dsta.T - uncor_resp, cmap='bwr')
        plt.plot([ND - 0.5, ND - 0.5], [-0.5, num_dlags - 0.5], 'k')
        plt.plot([-0.5, 2 * ND - 0.5], [best_lag, best_lag], 'k--')
        plt.subplot(1, 2, 2)
        plt.plot(Dtun)
        plt.plot(-Dtun[:, 1] + 2 * uncor_resp, 'm--')

        plt.plot([0, ND - 1], [uncor_resp, uncor_resp], 'k')
        plt.xlim([0, ND - 1])
        plt.show()

    return Dinfo
コード例 #27
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def prep_stim_model(Stim,
                    Robs,
                    dims,
                    valid=None,
                    num_lags=10,
                    plot=True,
                    Cindx=None,
                    cids=None):

    NX = dims[0]
    NY = dims[1]

    NT, NC = Robs.shape

    if valid is None:
        valid = np.arange(0, NT, 1)

    # create time-embedded stimulus
    Xstim, rinds = create_time_embedding_valid(Stim, [num_lags, NX, NY], valid)
    Rvalid = deepcopy(Robs[rinds, :])

    NTv = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NTv, NT))

    stas = Xstim.T @ (Rvalid - np.average(Rvalid, axis=0))
    stas = np.reshape(stas, [NX * NY, num_lags, NC]) / NTv

    if plot:
        plt.figure(figsize=(10, 15))
        sx, sy = U.get_subplot_dims(NC)

    mu = np.zeros(NC)
    for cc in range(NC):
        if plot:
            plt.subplot(sx, sy, cc + 1)
            plt.plot(np.abs(stas[:, :, cc]).T, color=[.5, .5, .5])
        tlevel = np.median(
            np.abs(stas[:, :, cc] - np.average(stas[:, :, cc]))) * 4
        mu[cc] = np.average(np.abs(stas[:, :, cc]) > tlevel)

        if plot:
            plt.axhline(tlevel, color='k')
            plt.title(cc)

    # threshold good STAS
    thresh = 0.01
    if plot:
        plt.figure()
        plt.plot(mu, '-o')
        plt.axhline(thresh, color='k')
        plt.show()

    if cids is None:
        cids = np.where(mu > thresh)[0]  # units to analyze
        print("found %d good STAs" % len(cids))

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in cids:
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NY, NX)))
            plt.title(cc)

    # index into "good" units
    Rvalid = Rvalid[:, cids]
    NC = Rvalid.shape[1]
    stas = stas[:, :, cids]

    if Cindx is None:
        print("Getting Crop Index")
        # Crop stimulus to center around RFs
        sumdensity = np.zeros([NX * NY])
        for cc in range(NC):
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            sumdensity += stas[:, bestlag, cc]**2

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity, [NY, NX]))
            plt.title("Sum Density STA")

        # get Crop indices (TODO: debug)
        sumdensity = (sumdensity - np.min(sumdensity)) / (np.max(sumdensity) -
                                                          np.min(sumdensity))
        I = np.reshape(sumdensity, [NY, NX]) > .3
        xinds = np.where(np.sum(I, axis=0) > 0)[0]
        yinds = np.where(np.sum(I, axis=1) > 0)[0]

        NX2 = np.maximum(len(xinds), len(yinds))
        x0 = np.min(xinds)
        y0 = np.min(yinds)

        xinds = range(x0, x0 + NX2)
        yinds = range(y0, y0 + NX2)

        Cindx = crop_indx(NX, xinds, yinds)

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity[Cindx], [NX2, NX2]))
            plt.title('Cropped')
            plt.show()

    NX2 = np.sqrt(len(Cindx)).astype(int)

    # make new cropped stimulus
    Xstim, rinds = create_time_embedding_valid(Stim[:, Cindx],
                                               [num_lags, NX2, NX2], valid)

    # index into Robs
    Rvalid = deepcopy(Robs[rinds, :])
    Rvalid = Rvalid[:, cids]
    Rvalid = NDNutils.shift_mat_zpad(Rvalid, -1, dim=0)  # get rid of first lag

    NC = Rvalid.shape[1]  # new number of units
    NT = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NT, Stim.shape[0]))
    print('%d good units' % NC)

    # double-check STAS work with cropped stimulus
    stas = Xstim.T @ Rvalid
    stas = np.reshape(stas, [NX2 * NX2, num_lags, NC]) / NT

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in range(NC):
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NX2, NX2)))
            plt.title(cc)
        plt.show()

    dims = (num_lags, NX2, NX2)
    return Xstim, Rvalid, dims, Cindx, cids
コード例 #28
0
import numpy as np
import torch
from torch import nn

import matplotlib.pyplot as plt  # plotting
import seaborn as sns

from V1FreeViewingCode.models.datasets import PixelDataset
from torch.utils.data import Dataset, DataLoader, random_split

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(0)

import NDN3.NDNutils as NDNutils

which_gpu = NDNutils.assign_gpu()

import tensorflow as tf

import NDN3.NDN as NDN
import NDN3.Utils.DanUtils as DU

import V1FreeViewingCode.Analysis.notebooks.Utils as U

#%% Load dataset
sessid = '20200304'
# sessid = '20191231'

#%% import models
from V1FreeViewingCode.models.encoders import Encoder
import V1FreeViewingCode.models.cores as cores
コード例 #29
0
ファイル: neureye.py プロジェクト: jcbyts/V1FreeViewingCode
def get_stim_model(Stim,
                   Robs,
                   dims,
                   valid=None,
                   num_lags=10,
                   plot=True,
                   XTreg=0.05,
                   L1reg=5e-3,
                   MIreg=0.1,
                   MSCreg=10.0,
                   Greg=0.1,
                   Mreg=1e-4,
                   num_subs=36,
                   num_hid=24,
                   num_tkern=None,
                   Cindx=None,
                   base_mod=None,
                   cids=None,
                   autoencoder=False):

    NX = dims[0]
    NY = dims[1]

    NT, NC = Robs.shape

    if valid is None:
        valid = np.arange(0, NT, 1)

    # create time-embedded stimulus
    Xstim, rinds = create_time_embedding_valid(Stim, [num_lags, NX, NY], valid)
    Rvalid = deepcopy(Robs[rinds, :])

    NTv = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NTv, NT))

    stas = Xstim.T @ (Rvalid - np.average(Rvalid, axis=0))
    stas = np.reshape(stas, [NX * NY, num_lags, NC]) / NTv

    if plot:
        plt.figure(figsize=(10, 15))
        sx, sy = U.get_subplot_dims(NC)

    mu = np.zeros(NC)
    for cc in range(NC):
        if plot:
            plt.subplot(sx, sy, cc + 1)
            plt.plot(np.abs(stas[:, :, cc]).T, color=[.5, .5, .5])
        tlevel = np.median(
            np.abs(stas[:, :, cc] - np.average(stas[:, :, cc]))) * 4
        mu[cc] = np.average(np.abs(stas[:, :, cc]) > tlevel)

        if plot:
            plt.axhline(tlevel, color='k')
            plt.title(cc)

    # threshold good STAS
    thresh = 0.01
    if plot:
        plt.figure()
        plt.plot(mu, '-o')
        plt.axhline(thresh, color='k')
        plt.show()

    if cids is None:
        cids = np.where(mu > thresh)[0]  # units to analyze
        print("found %d good STAs" % len(cids))

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in cids:
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NY, NX)))
            plt.title(cc)

    # index into "good" units
    Rvalid = Rvalid[:, cids]
    NC = Rvalid.shape[1]
    stas = stas[:, :, cids]

    if Cindx is None:
        print("Getting Crop Index")
        # Crop stimulus to center around RFs
        sumdensity = np.zeros([NX * NY])
        for cc in range(NC):
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            sumdensity += stas[:, bestlag, cc]**2

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity, [NY, NX]))
            plt.title("Sum Density STA")

        # get Crop indices (TODO: debug)
        sumdensity = (sumdensity - np.min(sumdensity)) / (np.max(sumdensity) -
                                                          np.min(sumdensity))
        I = np.reshape(sumdensity, [NY, NX]) > .3
        xinds = np.where(np.sum(I, axis=0) > 0)[0]
        yinds = np.where(np.sum(I, axis=1) > 0)[0]

        NX2 = np.maximum(len(xinds), len(yinds))
        x0 = np.min(xinds)
        y0 = np.min(yinds)

        xinds = range(x0, x0 + NX2)
        yinds = range(y0, y0 + NX2)

        Cindx = crop_indx(NX, xinds, yinds)

        if plot:
            plt.figure()
            plt.imshow(np.reshape(sumdensity[Cindx], [NX2, NX2]))
            plt.title('Cropped')
            plt.show()

    NX2 = np.sqrt(len(Cindx)).astype(int)

    # make new cropped stimulus
    Xstim, rinds = create_time_embedding_valid(Stim[:, Cindx],
                                               [num_lags, NX2, NX2], valid)

    # index into Robs
    Rvalid = deepcopy(Robs[rinds, :])
    Rvalid = Rvalid[:, cids]
    Rvalid = NDNutils.shift_mat_zpad(Rvalid, -1, dim=0)  # get rid of first lag

    NC = Rvalid.shape[1]  # new number of units
    NT = Rvalid.shape[0]
    print('%d valid samples of %d possible' % (NT, Stim.shape[0]))
    print('%d good units' % NC)

    # double-check STAS work with cropped stimulus
    stas = Xstim.T @ Rvalid
    stas = np.reshape(stas, [NX2 * NX2, num_lags, NC]) / NT

    if plot:
        plt.figure(figsize=(10, 15))
        for cc in range(NC):
            plt.subplot(sx, sy, cc + 1)
            bestlag = np.argmax(np.max(abs(stas[:, :, cc]), axis=0))
            plt.imshow(np.reshape(stas[:, bestlag, cc], (NX2, NX2)))
            plt.title(cc)
        plt.show()

    Ui, Xi = NDNutils.generate_xv_folds(NT)

    # fit SCAFFOLD MODEL
    try:
        if len(XTreg) == 2:
            d2t = XTreg[0]
            d2x = XTreg[1]
        else:
            d2t = XTreg[0]
            d2x = deepcopy(d2t)
    except TypeError:
        d2t = deepcopy(XTreg)
        d2x = deepcopy(XTreg)

    # optimizer parameters
    adam_params = U.def_adam_params()

    if not base_mod is None:
        side2b = base_mod.copy_model()
        side2b.set_regularization('d2t', d2t, layer_target=0)
        side2b.set_regularization('d2x', d2x, layer_target=0)
        side2b.set_regularization('glocal', Greg, layer_target=0)
        side2b.set_regularization('l1', L1reg, layer_target=0)
        side2b.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2b.set_regularization('max',
                                  MSCreg,
                                  ffnet_target=1,
                                  layer_target=0)

        if len(side2b.networks) == 4:  # includes autoencoder network
            input_data = [Xstim, Rvalid]
        else:
            input_data = Xstim

    else:
        # Best regularization arrived at
        Greg0 = 1e-1
        Mreg0 = 1e-6
        L1reg0 = 1e-5

        if not num_tkern is None:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_tkern, num_subs, num_hid],
                layer_types=['conv', 'normal', 'normal'],
                ei_layers=[None, num_subs // 2, num_hid // 2],
                conv_filter_widths=[1],
                normalization=[1, 1, 1],
                act_funcs=['lin', 'relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [1e-3],
                    'd2x': [None, XTreg],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0, Greg0]
                })
        else:
            ndn_par = NDNutils.ffnetwork_params(
                input_dims=[1, NX2, NX2, num_lags],
                layer_sizes=[num_subs, num_hid],
                layer_types=['normal', 'normal'],
                ei_layers=[num_subs // 2, num_hid // 2],
                normalization=[1, 1],
                act_funcs=['relu', 'relu'],
                verbose=True,
                reg_list={
                    'd2t': [d2t],
                    'd2x': [d2x],
                    'l1': [L1reg0, L1reg0],
                    'glocal': [Greg0]
                })

        side_par = NDNutils.ffnetwork_params(network_type='side',
                                             xstim_n=None,
                                             ffnet_n=0,
                                             layer_sizes=[NC],
                                             layer_types=['normal'],
                                             normalization=[-1],
                                             act_funcs=['softplus'],
                                             verbose=True,
                                             reg_list={'max': [Mreg0]})

        side_par[
            'pos_constraints'] = True  # ensures Exc and Inh mean something

        if autoencoder:  # capturea additional variability using autoencoder
            auto_par = NDNutils.ffnetwork_params(
                input_dims=[1, NC, 1],
                xstim_n=[1],
                layer_sizes=[2, 1, NC],
                time_expand=[0, 15, 0],
                layer_types=['normal', 'temporal', 'normal'],
                conv_filter_widths=[None, 1, None],
                act_funcs=['relu', 'lin', 'lin'],
                normalization=[1, 1, 0],
                reg_list={'d2t': [None, 1e-1, None]})

            add_par = NDNutils.ffnetwork_params(xstim_n=None,
                                                ffnet_n=[1, 2],
                                                layer_sizes=[NC],
                                                layer_types=['add'],
                                                act_funcs=['softplus'])

            side2 = NDN.NDN([ndn_par, side_par, auto_par, add_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            # set output regularization on the latent
            side2.batch_size = adam_params['batch_size']
            side2.initialize_output_reg(network_target=2,
                                        layer_target=1,
                                        reg_vals={'d2t': 1e-1})

            input_data = [Xstim, Rvalid]

        else:
            side2 = NDN.NDN([ndn_par, side_par],
                            ffnet_out=1,
                            noise_dist='poisson')

            input_data = Xstim

        _ = side2.train(input_data=input_data,
                        output_data=Rvalid,
                        train_indxs=Ui,
                        test_indxs=Xi,
                        silent=False,
                        learning_alg='adam',
                        opt_params=adam_params)

        side2.set_regularization('glocal', Greg, layer_target=0)
        side2.set_regularization('l1', L1reg, layer_target=0)
        side2.set_regularization('max', MIreg, ffnet_target=0, layer_target=1)
        side2.set_regularization('max', MSCreg, ffnet_target=1, layer_target=0)

        side2b = side2.copy_model()

    _ = side2b.train(input_data=input_data,
                     output_data=Rvalid,
                     train_indxs=Ui,
                     test_indxs=Xi,
                     silent=False,
                     learning_alg='adam',
                     opt_params=adam_params)

    LLs2n = side2b.eval_models(input_data=input_data,
                               output_data=Rvalid,
                               data_indxs=Xi,
                               nulladjusted=True)
    print(np.mean(LLs2n))
    if plot:
        plt.hist(LLs2n)
        plt.xlabel('Nats/Spike')
        plt.show()

    return side2b, Xstim, Rvalid, rinds, cids, Cindx
コード例 #30
0
ファイル: generator_net.py プロジェクト: kovacspe/diplomka
    def get_gan_subnet(self,
                       input_noise_size,
                       output_shape,
                       generator_type='conv'):
        output_shape = output_shape[1:]
        layers = 5
        if generator_type == 'conv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[64, 8, 8], 32, 16, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]

        elif generator_type == 'deepconv':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[[512, 4, 4], 256, 128, 1, 1],
                layer_types=['normal', 'deconv', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 2, None],
                reg_list={'d2x': [None, None, None, 0.01, None]},
                verbose=False)
            params['output_shape'] = [None, None, None, output_shape, None]

        elif generator_type == 'lin' or generator_type == 'lin_tanh':
            act = 'lin' if generator_type == 'lin' else 'tanh'
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[512, 1024, [1, 31, 31], 1],
                layer_types=['normal', 'normal', 'normal', 'mask'],
                act_funcs=['tanh', 'tanh', act, 'lin'],
                reg_list={
                    'l2': [0.01, 0.01, 0.01, None],
                },
                verbose=False)
            layers = 4

        elif generator_type == 'hybrid':
            params = NDNutils.ffnetwork_params(
                input_dims=[1, input_noise_size],
                layer_sizes=[256, [16, 16, 16], 8, 1, 1],
                layer_types=['normal', 'normal', 'deconv', 'deconv', 'mask'],
                act_funcs=['relu', 'relu', 'relu', 'tanh', 'lin'],
                conv_filter_widths=[None, 5, 5, 5, None],
                shift_spacing=[None, 2, 2, 1, None],
                reg_list={'d2x': [None, None, 0.01, 0.01, None]},
                verbose=False)
            params['output_shape'] = [
                None, None, output_shape, output_shape, None
            ]
        else:
            raise ValueError(
                f'Generator type \'{generator_type}\' not implemented.')

        params['xstim_n'] = [0]
        params['normalize_output'] = [None] * layers
        params['weights_initializers'] = ['normal'] * (layers - 1) + ['ones']
        params['biases_initializers'] = ['zeros'] * layers
        return params