Пример #1
0
def main(gpu_id=None):

    # Isolate requested GPU
    if gpu_id is not None: os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Initialize the model and stimulus
    model = Model()
    stim = Stimulus()

    for i in range(par['iterations']):

        # Generate a batch of trials
        trial_info = stim.make_batch()

        # Run the model on the provided batch of trials
        task_loss, act_loss, outputs = model.train_model(trial_info)

        # Calculate the network's accuracy
        acc_mask = trial_info['train_mask'] * (np.argmax(
            trial_info['desired_output'], axis=-1) != 0)
        accuracy = np.sum(
            acc_mask * (np.argmax(outputs['y'], axis=-1) == np.argmax(
                trial_info['desired_output'], axis=-1))) / np.sum(acc_mask)

        # Intermittently report feedback on the network
        if i % 10 == 0:

            # Plot the network's behavior
            behavior(trial_info, outputs, i)

            # Output the network's performance on the task
            print('{:>5} | Task Loss: {:6.3f} | Task Acc: {:5.3f} | Act. Loss: {:6.3f} |'.format(\
             i, task_loss.numpy(), accuracy, act_loss.numpy()))
Пример #2
0
def main(gpu_id = None):

    # Reset Tensorflow graph
    tf.reset_default_graph()

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        init = tf.global_variables_initializer()
        sess.run(init)

        # Load trained convolutional autoencoder
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            folder = './upsample2/'
            conv_model = tf.train.import_meta_graph(folder + 'conv_model_with_latent.meta', clear_devices=True)
            conv_model.restore(sess, tf.train.latest_checkpoint(folder))
            print('Loaded model from',folder)

        # Get all images from dataset
        stim = Stimulus()
        train_filename, test_filename, train_data, test_data, dummy_output = stim.get_all_data()

        # Run through all images in the dataset and save latent output
        save_latent(train_filename, train_data, dummy_output, sess)
        save_latent(test_filename, test_data, dummy_output, sess)
Пример #3
0
    def instantiate(self, image_size, filter_size, in_chn, out_chn, block_size,
                    ifmap, weights, bias, pruner_name, num_nonzero):
        self.name = 'tb'

        # if (debug):
        #     self.image_size = (4, 4)
        #     self.filter_size = (3, 3)
        #     self.in_chn = 2
        #     self.out_chn = 4
        #     self.block_size = 2
        #     self.num_nonzero = 1  #number of non-zero values in each blok, help test the correctness of the arch
        # else:
        #     self.image_size = (16, 16)
        #     self.filter_size = (3, 3)
        #     self.in_chn = 16
        #     self.out_chn = 8
        #     self.block_size = 4
        #     self.num_nonzero = 4

        self.image_size = image_size
        self.filter_size = filter_size
        self.in_chn = in_chn
        self.out_chn = out_chn
        self.block_size = block_size
        self.num_nonzero = num_nonzero  #number of non-zero values in each blok, help test the correctness of the arch

        #the inputs to this specific layer
        self.ifmap = ifmap
        self.weights = weights
        self.bias = bias
        self.pruner_name = pruner_name

        self.arr_y = self.out_chn
        self.input_chn = Channel()
        self.output_chn = Channel()

        ifmap_glb_depth = (self.filter_size[1] + (self.filter_size[0]-1)*\
            self.image_size[1]) * self.in_chn // self.block_size
        psum_glb_depth = self.out_chn // self.block_size
        weight_glb_depth = self.filter_size[0]*self.filter_size[1]* \
                self.in_chn*self.out_chn//self.block_size

        self.stimulus = Stimulus(self.arr_y, self.block_size, self.num_nonzero,
                                 self.input_chn, self.output_chn,
                                 self.pruner_name)
        self.dut = OSArch(self.arr_y, self.input_chn, self.output_chn,
                          self.block_size, self.num_nonzero, ifmap_glb_depth,
                          psum_glb_depth, weight_glb_depth)

        self.configuration_done = False
Пример #4
0
    def stimulate(self, modality=None, *args, **keywordArgs):
        """
        Add a :class:`stimulus <Network.Stimulus.Stimulus>` to this object with the given :class:`modality <Library.Modality.Modality>`.
        
        >>> neuron1.stimulate(library.modality('light')) 
        
        Returns the stimulus object that is created.
        """

        from stimulus import Stimulus
        from library.modality import Modality

        if modality != None and not isinstance(modality, Modality):
            raise TypeError, 'The modality argument passed to stimulate() must be a value obtained from the library or None.'

        stimulus = Stimulus(self.network,
                            target=self,
                            modality=modality,
                            *args,
                            **keywordArgs)
        self.stimuli.append(stimulus)
        self.network.addObject(stimulus)
        return stimulus
Пример #5
0
    # Update parameters with current weights
    update_parameters(data['par'], verbose=False)
    update_parameters({'batch_size': 128}, verbose=False)
    update_parameters({k+'_init':v for k,v in data['weights'].items()}, \
     verbose=False, update_deps=False)

    end_dead_time = par['dead_time'] // par['dt']
    end_fix_time = end_dead_time + par['fix_time'] // par['dt']
    end_sample_time = end_fix_time + par['sample_time'] // par['dt']
    end_delay_time = end_sample_time + par['delay_time'] // par['dt']

    # Make a new model and stimulus (which use the loaded parameters)
    print('\nLoading and running model.')
    model = Model()
    stim = Stimulus()
    runs = 8

    c_all = []
    d_all = []
    v_all = []
    s_all = []

    # Run a couple batches to generate sufficient data points
    for i in range(runs):
        print('R:{:>2}'.format(i), end='\r')
        trial_info = stim.make_batch(var_delay=False)
        model.run_model(trial_info, testing=True)

        c_all.append(trial_info['sample_cat'])
        d_all.append(trial_info['sample_dir'])
Пример #6
0
from pyglet.window import key

platform = pyglet.window.get_platform()
display = platform.get_default_display()
screen = display.get_screens()[1]
mywin = pyglet.window.Window(fullscreen=True, screen=screen)

# get keyboard input
keys = key.KeyStateHandler()
mywin.push_handlers(keys)

# initialize a stim object
mypoint = Stimulus(window=mywin,
                   type='PLANE',
                   x=mywin.width // 2,
                   y=mywin.height // 2,
                   width=4,
                   height=4)

colors = [
    [255, 255, 255],
    [255, 0, 0],
    [0, 255, 0],
    [0, 0, 255],
    [255, 255, 0],
    [255, 0, 255],
    [0, 255, 255],
]


@mywin.event
Пример #7
0
def main(gpu_id = None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Generate stimulus
    stim = Stimulus()

    # Initialize evolutionary model
    evo_model = EvoModel()

    # Model stats
    losses = []
    testing_losses = []
    save_iter = []

    # Reset Tensorflow graph
    tf.reset_default_graph()

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        init = tf.global_variables_initializer()
        sess.run(init)

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            # Load saved convolutional autoencoder
            folder = './latent_all_img_batch16_filt16_loss80/'
            conv_model = tf.train.import_meta_graph(folder + 'conv_model_with_latent.meta', clear_devices=True)
            conv_model.restore(sess, tf.train.latest_checkpoint(folder)) 
            print('Loaded model from',folder)


        stuck = 0
        test_loss = [1000000]
        threshold = [10000, 5000, 1000, 750, 500, 300, 150, -1]

        # Train the model
        start = time.time()
        for i in range(par['num_iterations']):

            # Generate train batch and run through the convolutional autoencoder
            input_data, conv_target, evo_target = stim.generate_train_batch()
            feed_dict = {'x:0': input_data, 'y:0': conv_target}
            conv_loss, conv_output, encoded = sess.run(['l:0', 'o:0','encoded:0'], feed_dict=feed_dict)

            # One cycle of evolutionary model
            evo_model.load_batch(encoded, evo_target)
            evo_model.run_models()
            evo_model.judge_models()
            evo_model.breed_models_genetic()

            # If all models are performing poorly, introduce new randomly generated models
            evo_loss = evo_model.get_losses(True)
            if par['num_migrators']:
                evo_model.migration()
            if evo_loss[0] < 10000:
                par['num_migrators'] = 0

            # Decrease mutation rate when loss value is below certain threshold
            if len(threshold) > 0 and evo_loss[0] < threshold[0]:
                evo_model.slowdown_mutation()
                threshold.pop(0)

            # If there is no improvement in performance for many iterations, change mutation rate
            if evo_loss[0] < test_loss[0]:
                stuck = 0
            else:
                stuck += 1
                if stuck > 20:
                    # evo_model.speed_up_mutation()
                    evo_model.slowdown_mutation()
                    stuck = 0

            # Check current status
            if i % par['print_iter'] == 0:

                # Print current status
                print_evo_stats(i, evo_model.con_dict['mutation_rate'], evo_model.con_dict['mutation_strength'], stuck, conv_loss, np.array([*evo_loss[0:3],evo_loss[par['num_survivors']-1]]), time.time()-start)
                losses.append(evo_loss[0])
                save_iter.append(i)

                # Save model and output
                if i % par['save_iter'] == 0 and (evo_loss[0] < test_loss[0] or i%100 == 0):

                    # Generate batch from testing set and run through both models
                    input_data, conv_target, evo_target = stim.generate_test_batch()
                    feed_dict = {'x:0': input_data, 'y:0': conv_target}
                    test_loss, conv_output, encoded = sess.run(['l:0', 'o:0','encoded:0'], feed_dict=feed_dict)

                    evo_model.load_batch(encoded, evo_target)
                    evo_model.run_models()
                    evo_model.judge_models()

                    test_loss = evo_model.get_losses(True)
                    testing_losses.append(test_loss[0])

                    # Save output images
                    plot_conv_evo_outputs(conv_target, conv_output, evo_target, evo_model.output, i)

                    # Save model
                    pickle.dump({'iter':save_iter,'var_dict':evo_model.var_dict, 'losses': losses, 'test_loss': testing_losses, 'last_iter': i}, \
                        open(par['save_dir']+'run_'+str(par['run_number'])+'_model_stats.pkl', 'wb'))
                    
                # Plot loss curve
                if i > 0:
                    plt.plot(losses[1:])
                    plt.savefig(par['save_dir']+'run_'+str(par['run_number'])+'_training_curve.png')
                    plt.close()
Пример #8
0
def run_SVM_analysis():

    print('\nLoading and running model.')
    model = Model()
    stim = Stimulus()
    runs = 8

    m_all = []
    v_all = []
    s_all = []

    for i in range(runs):
        print('R:{:>2}'.format(i), end='\r')
        trial_info = stim.make_batch(var_delay=False)
        model.run_model(trial_info)

        m_all.append(trial_info['sample_cat'])
        v_all.append(to_cpu(model.v))
        s_all.append(to_cpu(model.s))

    del model
    del stim

    batch_size = runs * par['batch_size']

    m = np.concatenate(m_all, axis=0)
    v = np.concatenate(v_all, axis=1)
    s = np.concatenate(s_all, axis=1)

    print('Performing SVM decoding on {} trials.\n'.format(batch_size))
    # Initialize linear classifier
    args = {
        'kernel': 'linear',
        'decision_function_shape': 'ovr',
        'shrinking': False,
        'tol': 1e-3
    }
    lin_clf_v = SVC(**args)
    lin_clf_s = SVC(**args)

    score_v = np.zeros([par['num_time_steps']])
    score_s = np.zeros([par['num_time_steps']])

    # Choose training and testing indices
    train_pct = 0.75
    num_train_inds = int(batch_size * train_pct)

    shuffled = np.random.permutation(batch_size)
    train_inds = shuffled[:num_train_inds]
    test_inds = shuffled[num_train_inds:]

    for t in range(end_dead_time, par['num_time_steps']):
        print('T:{:>4}'.format(t), end='\r')

        lin_clf_v.fit(v[t, train_inds, :], m[train_inds])
        lin_clf_s.fit(s[t, train_inds, :], m[train_inds])

        dec_v = lin_clf_v.predict(v[t, test_inds, :])
        dec_s = lin_clf_s.predict(s[t, test_inds, :])

        score_v[t] = np.mean(m[test_inds] == dec_v)
        score_s[t] = np.mean(m[test_inds] == dec_s)

    fig, ax = plt.subplots(1, figsize=(12, 8))
    ax.plot(score_v, c=[241 / 255, 153 / 255, 1 / 255], label='Voltage')
    ax.plot(score_s, c=[58 / 255, 79 / 255, 65 / 255], label='Syn. Eff.')

    ax.axhline(0.5, c='k', ls='--')
    ax.axvline(trial_info['timings'][0, 0], c='k', ls='--')
    ax.axvline(trial_info['timings'][1, 0], c='k', ls='--')

    ax.set_title('SVM Decoding of Sample Category')
    ax.set_xlabel('Time')
    ax.set_ylabel('Decoding Accuracy')
    ax.set_yticks([0., 0.25, 0.5, 0.75, 1.])
    ax.grid()
    ax.set_xlim(0, par['num_time_steps'] - 1)

    ax.legend()
    plt.savefig('./analysis/svm_decoding.png', bbox_inches='tight')

    print('SVM decoding complete.')
Пример #9
0
def biphasic_index2(sta):
    posarea = np.trapz(sta[sta > 0])
    negarea = np.trapz(sta[sta < 0])
    phasic_index = np.abs(posarea + negarea) / (np.abs(negarea) + np.abs(posarea)) # Ravi et al., 2019 J.Neurosci
    biphasic_index = 1 - phasic_index
    return biphasic_index

def biphasic_index3(sta):
    return np.abs(sta.max() + sta.min()) / (np.abs(sta.max()) + np.abs(sta.min()))

def biphasic_index3_stas(stas):
    maxs = stas.max(axis=1)
    mins = stas.min(axis=1)
    return 1 - (np.abs(maxs + mins) / (np.abs(maxs) + np.abs(mins)))

ff = Stimulus('20180710', 1)
ff = Stimulus('Kuehn', 2)

data = ff.read_datafile()
stas = np.array(data['stas'])

bps = np.empty(ff.nclusters)
bps2 = np.empty(ff.nclusters)
bps3 = biphasic_index3_stas(stas)

#%%
for i in range(ff.nclusters):

    pospeaks = find_peaks(stas[i, :], prominence=.2)[0]
    negpeaks = find_peaks(-stas[i, :], prominence=.2)[0]
    peaks = np.sort(np.hstack((pospeaks, negpeaks)))
Пример #10
0
#%%
if __name__ == '__main__':

    # Choice between OMB and FFF data
    # FFF has STA so it comes out as a significant component
    # OMB data has no structure in STA,
    use_omb = False
    exp = '20180710'

    if use_omb:
        stimnr = 8
        st = OMB(exp, stimnr)
        stimulus = st.bgsteps[0, :]
    else:
        stimnr = 1
        st = Stimulus('20180710', stimnr)
        stimulus = np.array(randpy.gasdev(-1000, st.frametimings.shape[0])[0])

    allspikes = st.allspikes()
    filter_length = st.filter_length

    rw = asc.rolling_window(stimulus, filter_length, preserve_dim=True)

    start_time = dt.datetime.now()
    # Calculate the significant components of STC for all cells
    sig_comps = []
    for i in range(st.nclusters):
        inds = sigtest(allspikes[i, :], stimulus, filter_length)
        sig_comps.append(inds)

    # Plot the significant components
Пример #11
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Reset Tensorflow graph
    tf.reset_default_graph()

    # Generate stimulus
    stim = Stimulus()

    # Load saved genetically trained model
    evo_model = EvoModel()
    saved_evo_model = pickle.load(
        open('./savedir/conv_task/run_21_model_stats.pkl', 'rb'))
    evo_model.update_variables(saved_evo_model['var_dict'])
    print('Loaded evo model')

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        init = tf.global_variables_initializer()
        sess.run(init)

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            # Load trained convolutional autoencoder
            if par['task'] == 'conv_task':
                folder = './latent_all_img_batch16_filt16_loss80/'
                conv_model = tf.train.import_meta_graph(
                    folder + 'conv_model_with_latent.meta', clear_devices=True)
                conv_model.restore(sess, tf.train.latest_checkpoint(folder))
                print('Loaded conv model from', folder)

                # Generate batch and save output images
                for i in range(4):
                    input_data, conv_target, evo_target = stim.generate_train_batch(
                    )

                    # Run input through convolutional model
                    feed_dict = {'x:0': input_data, 'y:0': conv_target}
                    test_loss, conv_output, encoded = sess.run(
                        ['l:0', 'o:0', 'encoded:0'], feed_dict=feed_dict)

                    # Run latent output through evolutionary model
                    evo_model.load_batch(encoded, evo_target)
                    evo_model.run_models()
                    evo_model.judge_models()

                    # Save output from both models
                    plot_conv_evo_outputs(conv_target,
                                          conv_output,
                                          evo_target,
                                          evo_model.output,
                                          i,
                                          test=True)
            else:
                folder = './'
                conv_top = tf.train.import_meta_graph(folder +
                                                      'conv_model_top.meta',
                                                      clear_devices=True)
                conv_top.restore(sess, tf.train.latest_checkpoint(folder))

                for i in range(4):
                    test_input, test_target, _ = stim.generate_test_batch()
                    feed_dict = {
                        'input:0': test_input,
                        'target:0': test_target
                    }
                    test_loss, test_output = sess.run(['l:0', 'output:0'],
                                                      feed_dict=feed_dict)
                    plot_conv_all(test_target, test_output, i)
Пример #12
0
import analysis_scripts as asc
import nonlinearity as nlt
import genlinmod as glm
from train_test_split import train_test_split

from stimulus import Stimulus

exp, stim_nr = '20180710_kilosorted', 8
# exp = 'Kuehn'

xval_splits = 10
xval_fraction = 1 / xval_splits

fff_stimnr = asc.stimulisorter(exp)['fff'][0]

fff = Stimulus(exp, fff_stimnr)
fff.filter_length = 20

# Get rid of list of numpy arrays
fff_stas = np.array(fff.read_datafile()['stas'])

glmlabel = 'GLM_contrast_xval'

savepath = os.path.join(fff.stim_dir, glmlabel)
os.makedirs(savepath, exist_ok=True)

fff.get_frametimings()
all_spikes = fff.allspikes()

start = dt.datetime.now()
Пример #13
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Reset Tensorflow graph
    tf.reset_default_graph()

    # Placeholders for the tensorflow model
    x = tf.placeholder(tf.float32,
                       shape=[par['batch_train_size'], par['n_input']],
                       name='x')
    y = tf.placeholder(tf.float32,
                       shape=[par['batch_train_size'], par['n_output']],
                       name='y')

    # Generate stimulus
    stim = Stimulus()

    # Model stats
    losses = []
    testing_losses = []
    save_iter = []

    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y)

        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver()

        # Train the model
        prev_loss = 10000000
        start = time.time()
        for i in range(par['num_iterations']):

            # Generate training batch and train model
            input_data, target_data, _ = stim.generate_train_batch()
            feed_dict = {x: input_data, y: target_data}
            _, train_loss, model_output = sess.run(
                [model.train_op, model.loss, model.output],
                feed_dict=feed_dict)

            # Check current status
            if i % par['print_iter'] == 0:

                # Print current status
                print_conv_stats(i, train_loss, time.time() - start)
                losses.append(train_loss)
                save_iter.append(i)

                # Test and save model
                if i % par['save_iter'] == 0:

                    # Generate test bach and get model performance
                    test_input, test_target, _ = stim.generate_test_batch()
                    feed_dict = {x: test_input, y: test_target}
                    test_loss, test_output = sess.run([
                        model.loss,
                        model.output,
                    ],
                                                      feed_dict=feed_dict)
                    testing_losses.append(test_loss)

                    # Plot model outputs
                    if test_loss < prev_loss:
                        prev_loss = test_loss
                        plot_conv_outputs(target_data, model_output,
                                          test_target, test_output, i)

                        # Save training stats and model
                        weight = sess.run(
                            tf.get_collection(tf.GraphKeys.VARIABLES,
                                              'filters/kernel')[0])
                        pickle.dump({'weight':weight,'iter':save_iter,'losses': losses, 'test_loss': testing_losses, 'last_iter': i}, \
                            open(par['save_dir']+'run_'+str(par['run_number'])+'_model_stats.pkl', 'wb'))

                        saved_path = saver.save(
                            sess, './upsample2/conv_model_with_latent')
                        print('model saved in {}'.format(saved_path))

                # Plot loss curve
                if i > 0:
                    plt.plot(losses[1:])
                    plt.savefig(par['save_dir'] + 'run_' +
                                str(par['run_number']) + '_training_curve.png')
                    plt.close()
Пример #14
0
def main():

    # Start the model run by loading the network controller and stimulus
    print('\nLoading model...')
    model = Model()
    stim = Stimulus()

    t0 = time.time()
    print('Starting training.\n')

    full_acc_record = []
    task_acc_record = []
    iter_record = []
    I_sqr_record = []
    W_rnn_grad_sum_record = []
    W_rnn_grad_norm_record = []

    # Run the training loop
    for i in range(par['iterations']):

        # Process a batch of stimulus using the current models
        trial_info = stim.make_batch()
        model.run_model(trial_info)
        model.optimize()

        losses = model.get_losses()
        mean_spiking = model.get_mean_spiking()
        task_accuracy, full_accuracy = model.get_performance()

        full_acc_record.append(full_accuracy)
        task_acc_record.append(task_accuracy)
        iter_record.append(i)
        I_sqr_record.append(model.I_sqr)
        W_rnn_grad_sum_record.append(cp.sum(model.var_dict['W_rnn']))
        W_rnn_grad_norm_record.append(LA.norm(model.grad_dict['W_rnn']))

        W_exc_mean = cp.mean(
            cp.maximum(0, model.var_dict['W_rnn'][:par['n_exc'], :]))
        W_inh_mean = cp.mean(
            cp.maximum(0, model.var_dict['W_rnn'][par['n_exc']:, :]))

        info_str0 = 'Iter {:>5} | Task Loss: {:5.3f} | Task Acc: {:5.3f} | '.format(
            i, losses['task'], task_accuracy)
        info_str1 = 'Full Acc: {:5.3f} | Mean Spiking: {:6.3f} Hz'.format(
            full_accuracy, mean_spiking)
        print('Aggregating data...', end='\r')

        if i % 20 == 0:

            # print('Mean EXC w_rnn ', W_exc_mean, 'mean INH w_rnn', W_inh_mean)
            if par['plot_EI_testing']:
                pf.EI_testing_plots(i, I_sqr_record, W_rnn_grad_sum_record,
                                    W_rnn_grad_norm_record)

            pf.run_pev_analysis(trial_info['sample'], to_cpu(model.su*model.sx), \
             to_cpu(model.z), to_cpu(cp.stack(I_sqr_record)), i)
            weights = to_cpu(model.var_dict['W_rnn'])
            fn = './savedir/{}_weights.pkl'.format(par['savefn'])
            data = {'weights': weights, 'par': par}
            pickle.dump(data, open(fn, 'wb'))

            pf.activity_plots(i, model)
            pf.clopath_update_plot(i, model.clopath_W_in, model.clopath_W_rnn, \
             model.grad_dict['W_in'], model.grad_dict['W_rnn'])
            pf.plot_grads_and_epsilons(i, trial_info, model, model.h,
                                       model.eps_v_rec, model.eps_w_rec,
                                       model.eps_ir_rec)

            if i != 0:
                pf.training_curve(i, iter_record, full_acc_record,
                                  task_acc_record)

            if i % 100 == 0:
                model.visualize_delta(i)

                if par['save_data_files']:
                    data = {'par': par, 'weights': to_cpu(model.var_dict)}
                    pickle.dump(
                        data,
                        open(
                            './savedir/{}_data_iter{:0>6}.pkl'.format(
                                par['savefn'], i), 'wb'))

            trial_info = stim.make_batch(var_delay=False)
            model.run_model(trial_info, testing=True)
            model.show_output_behavior(i, trial_info)

        # Print output info (after all saving of data is complete)
        print(info_str0 + info_str1)

        if i % 100 == 0:
            if np.mean(task_acc_record[-100:]) > 0.9:
                print(
                    '\nMean accuracy greater than 0.9 over last 100 iters.\nMoving on to next model.\n'
                )
                break
Пример #15
0
def main():

    # Start the model run by loading the network controller and stimulus
    print('\nStarting model run: {}'.format(par['save_fn']))
    control = NetworkController()
    stim = Stimulus()

    # Select whether to get losses ranked, according to learning method
    if par['learning_method'] in ['GA', 'TA']:
        is_ranked = True
    elif par['learning_method'] == 'ES':
        is_ranked = False
    else:
        raise Exception('Unknown learning method: {}'.format(
            par['learning_method']))

    # Get loss baseline and update the ensemble reference accordingly
    control.load_batch(stim.make_batch())
    control.run_models()
    control.judge_models()
    # loss_baseline only runs every 50 iterations, and then throws a nan warning. Which then causes h to have another axis and throw a memory loss error.
    loss_baseline = np.nanmean(control.get_losses(is_ranked))
    print("loss_baseline")
    print(loss_baseline)
    control.update_constant('loss_baseline', loss_baseline)

    # Establish records for training loop
    save_record = {'iter':[], 'mean_task_acc':[], 'mean_full_acc':[], 'top_task_acc':[], \
        'top_full_acc':[], 'loss':[], 'mut_str':[], 'spiking':[], 'loss_factors':[]}

    t0 = time.time()
    # Run the training loop
    for i in range(par['iterations']):

        # Process a batch of stimulus using the current models
        control.load_batch(stim.make_batch())
        control.run_models()
        control.judge_models()

        # Get the current loss scores
        loss = control.get_losses(is_ranked)

        # Apply optimizations based on the current learning method(s)
        mutation_strength = 0.
        if par['learning_method'] in ['GA', 'TA']:
            mutation_strength = par['mutation_strength'] * (
                np.nanmean(loss[:par['num_survivors']]) / loss_baseline)
            control.update_constant('mutation_strength', mutation_strength)
            """
            thresholds = [0.25, 0.1, 0.05, 0]
            modifiers  = [1/2, 1/4, 1/8]
            for t in range(len(thresholds))[:-1]:
                if thresholds[t] > mutation_strength > thresholds[t+1]:
                    mutation_strength = par['mutation_strength']*np.nanmean(loss)/loss_baseline * modifiers[t]
                    break
            """

            if par['learning_method'] == 'GA':
                control.breed_models_genetic()
            elif par['learning_method'] == 'TA':
                control.update_constant(
                    'temperature',
                    par['temperature'] * par['temperature_decay']**i)
                control.breed_models_thermal(i)

        elif par['learning_method'] == 'ES':
            control.breed_models_evo_search(i)

        # Print and save network performance as desired
        if i % par['iters_per_output'] == 0:
            task_accuracy, full_accuracy = control.get_performance()
            loss_dict = control.get_losses_by_type(is_ranked)
            spikes = control.get_spiking()

            task_loss = np.mean(loss_dict['task'][:par['num_survivors']])
            freq_loss = np.mean(loss_dict['freq'][:par['num_survivors']])
            reci_loss = np.mean(loss_dict['reci'][:par['num_survivors']])

            mean_loss = np.mean(loss[:par['num_survivors']])
            task_acc = np.mean(task_accuracy[:par['num_survivors']])
            full_acc = np.mean(full_accuracy[:par['num_survivors']])
            spiking = np.mean(spikes[:par['num_survivors']])

            if par['learning_method'] in ['GA', 'TA']:
                top_task_acc = task_accuracy.max()
                top_full_acc = full_accuracy.max()
            elif par['learning_method'] == 'ES':
                top_task_acc = task_accuracy[0]
                top_full_acc = full_accuracy[0]

            save_record['iter'].append(i)
            save_record['top_task_acc'].append(top_task_acc)
            save_record['top_full_acc'].append(top_full_acc)
            save_record['mean_task_acc'].append(task_acc)
            save_record['mean_full_acc'].append(full_acc)
            save_record['loss'].append(mean_loss)
            save_record['loss_factors'].append(loss_dict)
            save_record['mut_str'].append(mutation_strength)
            save_record['spiking'].append(spiking)
            pickle.dump(save_record,
                        open(par['save_dir'] + par['save_fn'] + '.pkl', 'wb'))
            if i % (10 * par['iters_per_output']) == 0:
                print('Saving weights for iteration {}... ({})\n'.format(
                    i, par['save_fn']))
                pickle.dump(
                    to_cpu(control.var_dict),
                    open(par['save_dir'] + par['save_fn'] + '_weights.pkl',
                         'wb'))

            status_stringA = 'Iter: {:4} | Task Loss: {:5.3f} | Freq Loss: {:5.3f} | Reci Loss: {:5.3f}'.format( \
                i, task_loss, freq_loss, reci_loss)
            status_stringB = 'Opt:  {:>4} | Full Loss: {:5.3f} | Mut Str: {:7.5f} | Spiking: {:5.2f} Hz'.format( \
                par['learning_method'], mean_loss, mutation_strength, spiking)
            status_stringC = 'S/O:  {:4} | Top Acc (Task/Full): {:5.3f} / {:5.3f}  | Mean Acc (Task/Full): {:5.3f} / {:5.3f}'.format( \
                int(time.time()-t0), top_task_acc, top_full_acc, task_acc, full_acc)
            print(status_stringA + '\n' + status_stringB + '\n' +
                  status_stringC + '\n')
            t0 = time.time()
Пример #16
0
import iofuncs as iof
import analysis_scripts as asc
import nonlinearity as nlt
import genlinmod as glm

from omb import OMB
from stimulus import Stimulus

#exp, stim_nr  = '20180710', 8
#exp, stim_nr  = 'Kuehn', 13

fff_stimnr = asc.stimulisorter(exp)['fff'][0]

st = OMB(exp, stim_nr)
fff = Stimulus(exp, fff_stimnr)

# Get rid of list of numpy arrays
fff_stas = np.array(fff.read_datafile()['stas'])

glmlabel = 'GLM_contrast'

savepath = os.path.join(st.stim_dir, glmlabel)
os.makedirs(savepath, exist_ok=True)

texture_data = st.read_texture_analysis()

all_spikes = st.allspikes()

start = dt.datetime.now()
Пример #17
0

#x = sigtest(spikes, stimulus, filter_length)
#x_min, x_max = confidence_interval_2d(x)
#%%
#ax = plt.gca()
#ax.fill_between(np.arange(filter_length),
#                 x.min(axis=1), x.max(axis=1),
#                 color='grey', alpha=.5)
#ax.fill_between(np.arange(filter_length), x_min, x_max,
#                color='red', alpha=.5)
#ax.plot(eigvals, 'ok')
#%%
exp, stimnr = '20180710', 1

ff = Stimulus(exp, stimnr)
stimulus = np.array(randpy.gasdev(-1000, ff.frametimings.shape[0])[0])

st = OMB(exp, 8)
ff = st
stimulus = st.bgsteps[0, :]

allspikes = ff.allspikes()

i = 0
spikes = allspikes[i, :]
filter_length = ff.filter_length

rw = asc.rolling_window(stimulus, filter_length, preserve_dim=True)
sta = (spikes @ rw) / spikes.sum()
#%%
Пример #18
0
def main(gpu_id = None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Reset Tensorflow graph
    tf.reset_default_graph()

    # Generate stimulus
    stim = Stimulus()

    # Placeholders for the tensorflow model
    x = tf.placeholder(tf.float32, shape=[par['batch_train_size'],par['n_input']])
    y = tf.placeholder(tf.float32, shape=[par['batch_train_size'],par['n_output']])
    
    # Model stats
    losses = []
    testing_losses = []
    save_iter = []
    prev_loss = 10000

    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x,y)
        
        init = tf.global_variables_initializer()
        sess.run(init)

        # Train the model
        start = time.time()
        for i in range(par['num_iterations']):

            # Generate training set
            input_data, target_data, _ = stim.generate_train_batch()
            feed_dict = {x: input_data, y: target_data}
            _, train_loss, model_output = sess.run([model.train_op, model.loss, model.output], feed_dict=feed_dict)


            # Check current status
            if i % par['print_iter'] == 0:

                # Print current status
                print('Model {:2} | Task: {:s} | Iter: {:6} | Loss: {:8.3f} | Run Time: {:5.3f}s'.format( \
                    par['run_number'], par['task'], i, train_loss, time.time()-start))
                losses.append(train_loss)
                save_iter.append(i)

                # Save one training and output img from this iteration
                if i % par['save_iter'] == 0:

                    # Generate batch from testing set and check the output
                    test_input, test_target, _ = stim.generate_test_batch()
                    feed_dict = {x: test_input, y: test_target}
                    test_loss, test_output = sess.run([model.loss, model.output], feed_dict=feed_dict)
                    testing_losses.append(test_loss)

                    if test_loss < prev_loss:
                        prev_loss = test_loss
                    
                        # Results from a training sample
                        original1 = target_data[0].reshape(par['out_img_shape'])
                        output1 = model_output[0].reshape(par['out_img_shape'])
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(original1,'Training',(5,20), font, 0.5,(255,255,255), 2, cv2.LINE_AA)
                        cv2.putText(output1,'Output',(5,20), font, 0.5,(255,255,255), 2, cv2.LINE_AA)

                        # Results from a testing sample
                        original2 = test_target[1].reshape(par['out_img_shape'])
                        output2 = test_output[1].reshape(par['out_img_shape'])
                        original3 = test_target[2].reshape(par['out_img_shape'])
                        output3 = test_output[2].reshape(par['out_img_shape'])
                        cv2.putText(original2,'Testing',(5,20), font, 0.5,(255,255,255), 2, cv2.LINE_AA)
                        cv2.putText(output2,'Output',(5,20), font, 0.5,(255,255,255), 2, cv2.LINE_AA)
                    
                        vis1 = np.concatenate((original1, output1), axis=1)
                        vis2 = np.concatenate((original2, output2), axis=1)
                        vis3 = np.concatenate((original3, output3), axis=1)
                        vis = np.concatenate((vis1, vis2), axis=0)
                        vis = np.concatenate((vis, vis3), axis=0)

                        cv2.imwrite(par['save_dir']+'run_'+str(par['run_number'])+'_test_'+str(i)+'.png', vis)

                        pickle.dump({'iter':save_iter,'losses': losses, 'test_loss': testing_losses, 'last_iter': i}, \
                            open(par['save_dir']+'run_'+str(par['run_number'])+'_model_stats.pkl', 'wb'))


                # Plot loss curve
                if i > 0:
                    plt.plot(losses[1:])
                    plt.savefig(par['save_dir']+'run_'+str(par['run_number'])+'_training_curve.png')
                    plt.close()

                # Stop training if loss is small enough (just for sweep purposes)
                if train_loss < 30:
                    break