Exemplo n.º 1
0
def analyze_model_from_file(results, lesion=False, simulation=True):

    #results = pickle.load(open(save_fn,'rb'))

    # generate batch of batch_train_size - to run analyses on this batch
    if lesion:
        trial_type = 'spatialDMS'
        updates = {'trial_type': trial_type}
        update_parameters(updates)
        stim = stimulus.Stimulus()
        trial_info = stim.generate_trial(trial_type)

        #trial_info=results['trial_info'] #from the saved file - not useful when running models with more than one task
        #syn_x_stacked = np.stack(results['syn_x'], axis=1)
        #syn_u_stacked = np.stack(results['syn_u'], axis=1)
        h_stacked = np.stack(results['h'], axis=1)
        trial_time = np.arange(0, h_stacked.shape[1] * par['dt'], par['dt'])
        weights = results['weights']

        updates = {}
        for k, v in results['parameters'].items():
            updates[k] = v
        update_parameters(updates)
        save_fn = par['save_dir'] + par['save_fn'] + '_DMS'
        #lesion_results = lesion_weights_spatialcat(trial_info, h_stacked, syn_x_stacked, syn_u_stacked, weights)
        lesion_results = lesion_weights_spatialcat(trial_info, h_stacked,
                                                   weights)

        for key, val in lesion_results.items():
            results[key] = val

        pickle.dump(results, open(save_fn, 'wb'))
        print('Analysis results saved in ', save_fn)
    """
    Simulate the network for a new set of trials
    """
    if simulation:
        print('simulating network...')
        h_stacked = np.stack(results['h'], axis=1)
        trial_time = np.arange(0, h_stacked.shape[1] * par['dt'], par['dt'])
        weights = results['weights']

        updates = {}
        for k, v in results['parameters'].items():
            updates[k] = v
        update_parameters(updates)
        trial_type = 'spatial_cat'
        updates = {'color_flag': False, 'trial_type': trial_type}
        update_parameters(updates)
        stim = stimulus.Stimulus()
        trial_info_whiteonly = stim.generate_trial(trial_type)
        save_fn = par['save_dir'] + 'color_flag_' + par['save_fn']
        simulation_results = simulate_network_spatialcat(
            trial_info_whiteonly, h_stacked, weights)
        for key, val in simulation_results.items():
            results[key] = val

        pickle.dump(results, open(save_fn, 'wb'))
        print('Analysis results saved in ', save_fn)
Exemplo n.º 2
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    print_key_params()

    tf.reset_default_graph()
    x = tf.placeholder(tf.float32, [
        par['num_time_steps'] * par['trials_per_seq'], par['batch_size'],
        par['n_input']
    ], 'stim')
    r = tf.placeholder(tf.float32, [
        par['num_time_steps'] * par['trials_per_seq'], par['batch_size'],
        par['n_output']
    ], 'reward')
    m = tf.placeholder(
        tf.float32,
        [par['num_time_steps'] * par['trials_per_seq'], par['batch_size']],
        'mask')

    stim = stimulus.Stimulus()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9
                                )  # if gpu_id == '0' else tf.GPUOptions()

    results_dict = {'reward_list': [], 'novel_reward_list': []}

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, r, m)

        sess.run(tf.global_variables_initializer())
        reward_list = []

        for i in range(par['n_iters']):

            name, trial_info = stim.generate_trial(test_tasks=False)
            _, reward, pol_loss, action, h, mask  = \
             sess.run([model.train, model.reward_full, model.pol_loss, model.action, model.h, model.mask], \
             feed_dict={x:trial_info['neural_input'], r:trial_info['reward_data'],m:trial_info['train_mask']})
            results_dict['reward_list'].append(reshape_reward(reward))

            if i % 100 == 0:

                name, trial_info = stim.generate_trial(test_tasks=True)
                reward_novel = sess.run(model.reward_full, feed_dict={x:trial_info['neural_input'], \
                 r:trial_info['reward_data'], m:trial_info['train_mask']})
                results_dict['novel_reward_list'].append(
                    reshape_reward(reward_novel))

                print('Iter {:>4} | Reward: {:6.3f} | Reward novel: {:6.3f} | Pol. Loss: {:6.3f}'.format(\
                 i, np.mean(np.sum(reward, axis=0)), np.mean(np.sum(reward_novel, axis=0)),pol_loss))

                weights = sess.run(model.var_dict)
                results = {'weights': weights, 'results_dict': results_dict}
                pickle.dump(results, open(par['save_fn'], 'wb'))
Exemplo n.º 3
0
def main(gpu_id=None):
    """ Run training """

    # Isolate requested GPU
    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Define placeholders
    x = tf.placeholder(
        tf.float32, [par['num_time_steps'], par['batch_size'], par['n_input']],
        'stim')
    y = tf.placeholder(
        tf.float32,
        [par['num_time_steps'], par['batch_size'], par['n_output']], 'out')
    m = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size']],
                       'mask')

    # Set up stimulus and recording
    stim = stimulus.Stimulus()

    # Start TensorFlow session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8
                                ) if gpu_id == '0' else tf.GPUOptions()
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Select CPU or GPU
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, m)

        # Initialize variables and start timer
        sess.run(tf.global_variables_initializer())
        t_start = time.time()

        # Begin training loop, iterating over tasks
        for i in range(par['n_train_batches']):

            # Generate a batch of stimulus data for training
            trial_info = stim.make_batch()

            # Put together the feed dictionary
            feed_dict = {
                x: trial_info['neural_input'],
                y: trial_info['desired_output'],
                m: trial_info['train_mask']
            }

            # Run the model
            _, task_loss, output, state, spike = \
             sess.run([model.train, model.task_loss, \
             model.output, model.h, model.h_out], feed_dict=feed_dict)
Exemplo n.º 4
0
def main():

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Create placeholders for the model
    x = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3], 'stim')
    print('Batch size:', par['batch_size'])
    print('Input size:', 32 * 32 * 3, '\n')

    with tf.Session() as sess:
        with tf.device('/gpu:0'):
            model = Autoencoder(x)
            sess.run(tf.global_variables_initializer())
        t_start = time.time()

        s = stimulus.Stimulus()
        for i in range(2000):
            task_id = np.random.randint(10)
            y_hat, _, _ = s.make_batch(task_id, test=False)
            _, loss, spike_loss, y = sess.run(
                [model.train_op, model.loss, model.spike_loss, model.y],
                {x: y_hat})

            if i % 100 == 0:
                print('\n Iter. | Task | Loss')
                print('----------------------------')
            print('',
                  str(i).ljust(5), '|',
                  str(task_id).ljust(4), '|', loss, '|', spike_loss)

            if i % 100 == 0:
                print(y_hat.shape, y.shape)
                f, axarr = plt.subplots(2, 2)
                axarr[0, 0].imshow(y_hat[0],
                                   aspect='auto',
                                   interpolation='none')
                axarr[0, 0].set_title('Original 0')
                axarr[0, 1].imshow(y[0], aspect='auto', interpolation='none')
                axarr[0, 1].set_title('Reconstructed 0')
                axarr[1, 0].imshow(y_hat[1],
                                   aspect='auto',
                                   interpolation='none')
                axarr[1, 0].set_title('Original 1')
                axarr[1, 1].imshow(y[1], aspect='auto', interpolation='none')
                axarr[1, 1].set_title('Reconstructed 1')
                plt.savefig('./encoder_testing/' + str(i))

                W = {}
                for var in tf.trainable_variables():
                    W[var.op.name] = var.eval()
Exemplo n.º 5
0
def analyze(x, filename):
    update_parameters(x['parameters'])

    v_neuronal = np.zeros(
        [par['num_pulses'], par['num_motion_dirs'], par['n_hidden']])
    v_synaptic = np.zeros(
        [par['num_pulses'], par['num_motion_dirs'], par['n_hidden']])

    stim = stimulus.Stimulus()
    trial_info = stim.generate_trial()
    input_data = np.squeeze(
        np.split(trial_info['neural_input'],
                 x['parameters']['num_time_steps'],
                 axis=1))

    y_hat, h, syn_x, syn_u = run_model(input_data, x['parameters']['h_init'], \
        x['parameters']['syn_x_init'], x['parameters']['syn_u_init'], x['weights'])

    h = np.squeeze(np.split(h, x['parameters']['num_time_steps'], axis=1))
    syn_x = np.squeeze(
        np.split(syn_x, x['parameters']['num_time_steps'], axis=1))
    syn_u = np.squeeze(
        np.split(syn_u, x['parameters']['num_time_steps'], axis=1))

    for i in range(par['num_pulses']):
        time = x['timeline'][2 * i + 1] + 10
        dir = trial_info['sample'][:, i]
        for d in range(par['num_motion_dirs']):
            ind = np.where(dir == d)[0]
            v_neuronal[i, d, :] = np.mean(h[time][:, ind], axis=1)
            v_synaptic[i, d, :] = np.mean(syn_x[time][:, ind] *
                                          syn_u[time][:, ind],
                                          axis=1)

        print(np.mean(v_neuronal[i], axis=0).shape)
        v_neuronal[i] = v_neuronal[i] - np.mean(v_neuronal[i], axis=0)
        v_synaptic[i] = v_synaptic[i] - np.mean(v_synaptic[i], axis=0)

    dot_neuronal = np.zeros(
        (par['num_pulses'], par['num_pulses'], par['num_motion_dirs']))
    for j in range(par['num_pulses'] - 1):
        for i in range(j + 1, par['num_pulses']):
            dot_neuronal[j, i, :] = np.diag(
                np.dot(v_neuronal[j], np.transpose(v_neuronal[i])))

    print(dot_neuronal)
Exemplo n.º 6
0
    def __init__(self):

        # Reset TensorFlow graph
        tf.reset_default_graph()
        # Train on CIFAR-10 task
        task_id = 0

        # Create placeholders for the model
        input_data = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3],
                                    'stim')
        target_data = tf.placeholder(tf.float32, [par['batch_size'], 100],
                                     'target')
        mask = tf.placeholder(tf.float32,
                              [par['batch_size'], par['layer_dims'][-1]],
                              'mask')

        print('Batch size:', par['batch_size'])
        print('Input size:', 32 * 32 * 3, '\n')

        with tf.Session() as sess:
            cifar_model = self.model(input_data, target_data, mask)
            sess.run(tf.global_variables_initializer())
            t_start = time.time()

            s = stimulus.Stimulus(include_cifar10=True)

            for i in range(par['n_batches_top_down']):

                x, y, m = s.make_batch(task_id, test=False)
                _, loss = sess.run([self.train_op, self.loss],
                                   feed_dict={
                                       input_data: x,
                                       target_data: y,
                                       mask: m
                                   })

                if i % 1000 == 0:
                    print('Iteration ', i, ' Loss ', loss)
                    #print('', str(i).ljust(5), '|', str(task_id).ljust(4), '|', loss)

            W = {}
            for var in tf.trainable_variables():
                W[var.op.name] = var.eval()
            fn = './encoder_testing/conv_weights.pkl'
            pickle.dump(W, open(fn, 'wb'))
            print('Convolutional Weights saved in ', fn)
Exemplo n.º 7
0
def run_simulation(weights):

    update_parameters({'batch_train_size': 256})
    update_parameters({'noise_in_sd': 1e-9})
    update_parameters({'noise_rnn_sd': 1e-9})
    update_parameters({'n_recurrent': 10})
    stim = stimulus.Stimulus()
    trial_info = stim.generate_trial(0)
    trial_type = np.zeros((par['batch_train_size']))
    for i in range(par['batch_train_size']):
        trial_type[i] = 2*(trial_info['sample'][i]//3) \
            + trial_info['test'][i]//3

    trial_length = trial_info['neural_input'].shape[1]
    x = np.split(trial_info['neural_input'], trial_length, axis=1)
    y_hat, hidden_state_hist = run_model(x, par['h_init'], weights)

    return y_hat, hidden_state_hist, trial_type
Exemplo n.º 8
0
def analyze_model_from_file(filename, savefile = None):

    x = pickle.load(open(filename, 'rb'))
    if savefile is None:
        x['parameters']['save_fn'] = 'test.pkl'
    else:
        x['parameters']['save_fn'] = savefile
    update_parameters(x['parameters'])
    stim = stimulus.Stimulus()
    trial_info = stim.generate_trial()
    input_data = np.squeeze(np.split(trial_info['neural_input'], x['parameters']['num_time_steps'], axis=1))
    print('input_data', len(input_data), input_data[0].shape)

    y_hat, h, syn_x, syn_u = run_model(input_data, x['parameters']['h_init'], \
        x['parameters']['syn_x_init'], x['parameters']['syn_u_init'], x['weights'])

    h = np.squeeze(np.split(h, x['parameters']['num_time_steps'], axis=1))
    syn_x = np.squeeze(np.split(syn_x, x['parameters']['num_time_steps'], axis=1))
    syn_u = np.squeeze(np.split(syn_u, x['parameters']['num_time_steps'], axis=1))

    analyze_model(trial_info, y_hat, h, syn_x, syn_u, None, x['weights'], simulation = False, \
            lesion = False, tuning = True, decoding = False, load_previous_file = False, save_raw_data = False)
def main(save_fn, gpu_id=None):
    """ Run supervised learning training """

    # Update all dependencies in parameters
    update_dependencies()

    # Isolate requested GPU
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # If desired, train the convolutional layers with the CIFAR datasets
    # Otherwise, the network will load convolutional weights from the saved file
    if (par['task'] == 'cifar' or par['task']
            == 'imagenet') and par['train_convolutional_layers']:
        convolutional_layers.ConvolutionalLayers()

    print('\nRunning model.\n')

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Create placeholders for the model
    if par['task'] == 'mnist':
        x = tf.placeholder(tf.float32,
                           [par['batch_size'], par['layer_dims'][0]], 'stim')
    elif par['task'] == 'cifar' or par['task'] == 'imagenet':
        x = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3], 'stim')
    y = tf.placeholder(tf.float32, [par['batch_size'], par['layer_dims'][-1]],
                       'out')
    mask = tf.placeholder(tf.float32,
                          [par['batch_size'], par['layer_dims'][-1]], 'mask')
    rule = tf.placeholder(tf.float32, [par['batch_size'], par['n_tasks']],
                          'rulecue')
    gating = [
        tf.placeholder(tf.float32, [par['layer_dims'][n + 1]], 'gating')
        for n in range(par['n_layers'] - 1)
    ]
    droput_keep_pct = tf.placeholder(tf.float32, [], 'dropout')
    input_droput_keep_pct = tf.placeholder(tf.float32, [], 'input_dropout')

    # Set up stimulus
    stim = stimulus.Stimulus(labels_per_task=par['labels_per_task'])

    # Initialize accuracy records
    accuracy_full = []
    accuracy_grid = np.zeros((par['n_tasks'], par['n_tasks']))

    # Enter TensorFlow session
    with tf.Session() as sess:
        # sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        # Select CPU or GPU
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, gating, mask, droput_keep_pct,
                          input_droput_keep_pct, rule)

        # Initialize variables
        sess.run(tf.global_variables_initializer())
        sess.run(model.reset_prev_vars)

        # test if importance vals change
        prev_imp = tf.Variable(tf.zeros(par['layer_dims'][1]), trainable=False)

        # Begin training loop, iterating over tasks
        for task in range(par['n_tasks']):

            if par['gating_type'] is 'iXdG':
                # test if imp vals change
                sess.run(tf.assign(prev_imp, model.importance[1]))

                # Update the importance of each unit
                sess.run(model.update_importance)

                # Create gates by importance for each task
                sess.run(model.update_gates)
                curr_task_gate = {}
                for layer in range(1, len(par['layer_dims']) - 1):
                    copy = tf.Variable(tf.zeros(
                        tf.shape(model.curr_gate[layer])),
                                       trainable=False)
                    sess.run(tf.assign(copy, model.curr_gate[layer]))
                    curr_task_gate[layer] = sess.run(copy)
                model.gates[task] = curr_task_gate

                # testing gates
                if True:
                    if task == 0:
                        for var in model.variables_stabilization:
                            print(var.op.name)
                            print(var.get_shape())

                    assert sess.run(tf.count_nonzero(
                        model.curr_gate[1])) == 400
                    assert sess.run(
                        tf.equal(model.curr_gate[1],
                                 model.gates[task][1])).all()

                    if task > 0:
                        layer_test = 1
                        expected_active_units = round(
                            (1 - par['gate_pct']) *
                            par['layer_dims'][layer_test])

                        print('Gates equal?')
                        print(
                            sess.run(
                                tf.equal(model.gates[task][layer_test],
                                         model.gates[task -
                                                     1][layer_test])).all())
                        print('Importance vals equal?')
                        print(
                            sess.run(tf.equal(prev_imp,
                                              model.importance[1])).all())

                        # vals_imp, idxs_imp = tf.math.top_k(model.importance[layer_test], k=2000)
                        # print(sess.run(idxs_imp)[-400:])
                        # print(sess.run(vals_imp)[-400:])
                        # print(sess.run(model.curr_gate[layer_test]))
                        # _, idxs = tf.math.top_k(model.curr_gate[layer_test], k=expected_active_units)
                        # print(sess.run(idxs))

                        # assert sess.run(tf.equal(idxs, sess.run(idxs_imp)[-400:])).all()
                        # _, idxs_copy = tf.math.top_k(model.gates[task][layer_test], k=expected_active_units)
                        # _, idxs_prev = tf.math.top_k(model.gates[task-1][layer_test], k=expected_active_units)
                        # print(sess.run(tf.shape(model.curr_gate[layer_test])))
                        # print(sess.run(idxs_prev))
                        # print(sess.run(idxs_copy))

            # Create dictionary of gating signals applied to each hidden layer for this task
                gating_dict = {
                    k: v
                    for k, v in zip(gating, list(model.gates[task].values()))
                }
            else:
                gating_dict = {
                    k: v
                    for k, v in zip(gating, par['gating'][task])
                }

            # Create rule cue vector for this task
            rule_cue = np.zeros([par['batch_size'], par['n_tasks']])
            rule_cue[:, task] = 1

            # Iterate over batches
            for i in range(par['n_train_batches']):

                # Make batch of training data
                stim_in, y_hat, mk = stim.make_batch(task, test=False)

                # Run the model using one of the available stabilization methods
                if par['stabilization'] == 'pathint':
                    _, _, loss, AL = sess.run([model.train_op, model.update_small_omega, model.task_loss, model.aux_loss], \
                        feed_dict={x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})
                elif par['stabilization'] == 'EWC':
                    _, loss, AL = sess.run([model.train_op, model.task_loss, model.aux_loss], \
                        feed_dict={x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})

                # Display network performance
                # if i%500 == 0:
                if i % 9 == 0:
                    print('Iter: ', i, 'Loss: ', loss, 'Aux Loss: ', AL)

            # Update big omegaes, and reset other values before starting new task
            if par['stabilization'] == 'pathint':
                sess.run(model.update_big_omega)
            elif par['stabilization'] == 'EWC':
                for _ in range(par['EWC_batch_divisor'] *
                               par['EWC_fisher_num_batches']):
                    stim_in, _, mk = stim.make_batch(task, test=False)
                    sess.run([model.update_big_omega], feed_dict = \
                        {x:stim_in, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})

            # Reset the Adam Optimizer, and set the prev_weight values to their current values
            sess.run(model.reset_adam_op)
            sess.run(model.reset_prev_vars)
            if par['stabilization'] == 'pathint':
                sess.run(model.reset_small_omega)
            """
            # update unit importance values
            if par['gating_type'] is 'iXdG':
                sess.run(model.update_importance)
            """

            # Test the networks on all trained tasks
            num_test_reps = 10
            accuracy = np.zeros((task + 1))
            for test_task in range(task + 1):

                # Use appropriate gating and rule cues
                if par['gating_type'] is 'iXdG':
                    gating_dict = {
                        k: v
                        for k, v in zip(gating,
                                        list(model.gates[test_task].values()))
                    }
                else:
                    gating_dict = {
                        k: v
                        for k, v in zip(gating, par['gating'][test_task])
                    }
                test_rule_cue = np.zeros([par['batch_size'], par['n_tasks']])
                test_rule_cue[:, test_task] = 1

                # Repeat the test as desired
                for r in range(num_test_reps):
                    stim_in, y_hat, mk = stim.make_batch(test_task, test=True)
                    acc = sess.run(model.accuracy, feed_dict={x:stim_in, y:y_hat, \
                        **gating_dict, mask:mk, droput_keep_pct:1.0, input_droput_keep_pct:1.0, rule:test_rule_cue})/num_test_reps
                    accuracy_grid[task, test_task] += acc
                    accuracy[test_task] += acc

            # Display network performance after testing is complete
            print('Task ', task, ' Mean ', np.mean(accuracy), ' First ',
                  accuracy[0], ' Last ', accuracy[-1])
            accuracy_full.append(np.mean(accuracy))

            # Reset weights between tasks if called upon
            if par['reset_weights']:
                sess.run(model.reset_weights)

        # Save model performance and parameters if desired
        if par['save_analysis']:
            save_results = {'task': task, 'accuracy': accuracy, 'accuracy_full': accuracy_full, \
                            'accuracy_grid': accuracy_grid, 'par': par}
            pickle.dump(save_results, open(par['save_dir'] + save_fn, 'wb'))

    print('\nModel execution complete.')
Exemplo n.º 10
0
def main(task):
    """
    This function will take the task object as input and
    creates a model object to learn the task
    It would run the iterations
    At each iteration, a new batch of trials will be created
    """
    # Reset TensorFlow before running anything
    tf.reset_default_graph()

    # Tensorflow finds the supported CPU and GPU devices you can use
    config = tf.ConfigProto()

    trial_length = par['num_time_steps']

    # Calculate shape of the stimulus for this task
    # Define a placeholder for the stimulus the agent sees
    #stimulus = tf.placeholder(tf.float64, shape=[task.total_dur, task.num_inputs, par['batch_train_size']])
    stimulus = tf.placeholder(
        tf.float64,
        shape=[trial_length, task.num_inputs, par['batch_train_size']])
    # Define a placeholder for the truth or correct answer about each trial
    truth = tf.placeholder(tf.float64, shape=par['batch_train_size'])

    # A TEMPORARY placeholder for target
    #target = tf.placeholder(tf.float64, shape=[task.total_dur, 3, par['batch_train_size']])
    target = tf.placeholder(tf.float64,
                            shape=[trial_length, 3, par['batch_train_size']])
    # Create a model for the given task object
    M = Model()
    # Build the tf structure that runs trials
    M.run_model(task, stimulus, truth)
    M.optimize(task, target)

    # Create a model from Nick's code
    stim = stm.Stimulus()
    n_input = task.num_inputs
    '''
    mask = tf.placeholder(tf.float64, shape=[task.total_dur, par['batch_train_size']])
    x = tf.placeholder(tf.float64, shape=[n_input, task.total_dur, par['batch_train_size']])  # input data
    target2 = tf.placeholder(tf.float64, shape=[3, task.total_dur, par['batch_train_size']])  # input data
    actual_reward = tf.placeholder(tf.float64, shape=[task.total_dur,par['batch_train_size']])
    pred_reward = tf.placeholder(tf.float64, shape=[task.total_dur, par['batch_train_size']])
    actual_action = tf.placeholder(tf.float64, shape=[task.total_dur, 3, par['batch_train_size']])
    '''
    mask = tf.placeholder(tf.float64,
                          shape=[trial_length, par['batch_train_size']])
    x = tf.placeholder(tf.float64,
                       shape=[n_input, trial_length,
                              par['batch_train_size']])  # input data
    target2 = tf.placeholder(tf.float64,
                             shape=[3, trial_length,
                                    par['batch_train_size']])  # input data
    actual_reward = tf.placeholder(
        tf.float64, shape=[trial_length, par['batch_train_size']])
    pred_reward = tf.placeholder(tf.float64,
                                 shape=[trial_length, par['batch_train_size']])
    actual_action = tf.placeholder(
        tf.float64, shape=[trial_length, 3, par['batch_train_size']])
    M_Nick = model_RL.Model(x, target2, actual_reward, pred_reward,
                            actual_action, mask)
    #M_Nick.run_model(task, stimulus, truth)
    #M_Nick.optimize(task, target)

    with tf.Session(config=config) as sess:

        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()
        vloss = np.zeros((1, par['num_iterations']))
        ploss = np.zeros((1, par['num_iterations']))
        perf = np.zeros((1, par['num_iterations']))
        for it in range(par['num_iterations']):
            # Create a batch of stimuli, stores in attribute stimulus for the task
            #task.create_stimulus()
            # generate batch of batch_train_size with Nick's code
            trial_info = stim.generate_trial()
            """
            Run the model
            """

            my_truth = np.zeros(par['batch_train_size'])
            my_truth[trial_info['desired_output'][1, -1, :] ==
                     1] = 1  # Match trials
            my_truth[trial_info['desired_output'][2, -1, :] ==
                     1] = 2  # Non_match trials
            _, _, vloss[0, it], ploss[0, it], pol_grads, pol_out, pol_out0, actions, logpi, my_reward, action_array, time_mask, cumsum_logpi, pol_r, temp1, temp2, ideal, my_baseline, entropy = \
                sess.run([M.pol_train_op, M.val_train_op, M.Loss_val, M.Loss_pol, M.pol_capped_gvs,task.pol_out_history, task.pol_out_history0, task.actions, task.logpi, M.reward, task.action_array, task.time_mask, task.cumsum_logpi, \
                task.pol_r_history, task.temp1, task.temp2, task.ideal, M.baseline, \
                M.entropy], {stimulus: np.swapaxes(trial_info['neural_input'], 1, 0), truth: my_truth, target: np.swapaxes(trial_info['desired_output'], 1, 0)})
            # Run Nick's model
            pol_out, val_out, pol_rnn, action, stacked_mask, reward = sess.run([M_Nick.pol_out, M_Nick.val_out, M_Nick.h_pol, M_Nick.action, \
                 M_Nick.stacked_mask,M_Nick.reward], {x: trial_info['neural_input'], target2: trial_info['desired_output'], mask: trial_info['train_mask']})

            trial_reward = np.squeeze(np.stack(reward))
            trial_action = np.stack(action)

            _, _, pol_loss, val_loss = sess.run([M_Nick.train_pol, M_Nick.train_val, M_Nick.pol_loss, M_Nick.val_loss], \
                {x: trial_info['neural_input'], target2: trial_info['desired_output'], mask: trial_info['train_mask'], \
                actual_reward: trial_reward, pred_reward: np.squeeze(val_out), actual_action:trial_action })

            pol_out = np.array(pol_out)
            pol_out0 = np.array(pol_out0)
            temp1 = np.array(temp1)
            temp2 = np.array(temp2)

            if it % 100 == 0:
                fig = plt.plot(pol_out[:, :, 0])
                plt.legend(['Fixate', 'match', 'Non-match'])
                plt.title(str(my_truth[0]))
                plt.savefig('Iteration_' + str(it) +
                            '.png')  # save the figure to file
                plt.close()
                print('%6d,     %6.1f,   %6.1f,     %6.1f,  %6.1f,  %6.2f' %
                      (it, my_reward.sum(), my_baseline.sum(), ploss[0, it],
                       vloss[0, it], entropy))
                print('%6d,     %6.1f,   %6.1f,     %6.1f,  %6.1f' %
                      (it, np.array(trial_reward).sum(),
                       np.array(val_out).sum(), pol_loss, val_loss))
                #pdb.set_trace()
            #plt.plot(pol_out[:,:,0]); plt.show()
            #if np.isnan(ploss[0, it]):
            #    pdb.set_trace()
            #if it>=1000:
            #    pdb.set_trace()

        pdb.set_trace()
        a = 5
Exemplo n.º 11
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    tf.reset_default_graph()
    x = tf.placeholder(
        tf.float32, [par['num_time_steps'], par['batch_size'], par['n_input']],
        'stim')
    y = tf.placeholder(
        tf.float32,
        [par['num_time_steps'], par['batch_size'], par['n_output']], 'out')
    m = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size']],
                       'mask')

    stim = stimulus.Stimulus()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8
                                ) if gpu_id == '0' else tf.GPUOptions()
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            cortex = Cortex(x, y, m)

        sess.run(tf.global_variables_initializer())

        event_data = {'stimuli': [], 'actions': [], 'rewards': []}
        print('\nTraining cortex for {} task:'.format(par['task']))
        for i in range(par['n_batches']):

            t0 = time.time()

            name, trial_info = stim.generate_trial()
            feed_dict = {
                x: trial_info['neural_input'],
                y: trial_info['reward_data'],
                m: trial_info['train_mask']
            }

            _, pol_loss, val_loss, spike_loss, ent_loss, h, reward, action = \
             sess.run([cortex.train_op, cortex.pol_loss, cortex.val_loss, cortex.spike_loss, \
              cortex.ent_loss, cortex.h, cortex.reward, cortex.action], feed_dict=feed_dict)

            # Select appropriate events
            inds = list(np.where(reward != 0.))
            rew_zero_times = np.array(
                [np.random.randint(ts) for ts in inds[0]])
            inds[0] = np.array([
                np.random.choice([rt, zt], p=[0.75, 0.25])
                for rt, zt in zip(inds[0], rew_zero_times)
            ])

            event_stimuli = trial_info['neural_input'][inds[0], inds[1], :]
            event_actions = action[inds[0], inds[1], :]
            event_rewards = reward[inds[0], inds[1], :]

            # Sample from events and save the event data
            event_data['stimuli'].append(
                event_stimuli[::par['sample_step'], :])
            event_data['actions'].append(
                event_actions[::par['sample_step'], :])
            event_data['rewards'].append(
                event_rewards[::par['sample_step'], :])

            if i % 100 == 0:
                print('Iter: {:>4} | Rew: {:6.3f} | Pol. Loss: {:8.5f} | Val. Loss: {:8.5f} | Ent. Loss: {:8.5f} | Spiking: {:8.5f} |'.format(\
                 i, np.mean(np.sum(reward, axis=0)), pol_loss, val_loss, ent_loss, np.mean(h)))

    for (key, val) in event_data.items():
        event_data[key] = np.concatenate(val, axis=0)
    pickle.dump(
        event_data,
        open('./datadir/{}task_cortex_event_data.pkl'.format(par['task']),
             'wb'))
    print('Event samples saved.  Model complete. \n')
Exemplo n.º 12
0
def train_encoder(gpu_id=None):

    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    tf.reset_default_graph()
    x = tf.placeholder(tf.float32,
                       [par['num_time_steps'], None, par['n_input']], 'input')

    update_parameters({'noise_in': 0.05})
    stim = stimulus.Stimulus()

    with tf.Session() as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            encoder = Encoder(x)

        sess.run(tf.global_variables_initializer())

        print('\nTraining encoder for {} task:'.format(par['task']))
        for i in range(10001):

            name, trial_info = stim.generate_trial()
            feed_dict = {x: trial_info['neural_input']}
            _, rec_loss, act_loss, wei_loss, enc, rec, loss_plot = \
             sess.run([encoder.train, encoder.rec_loss, \
              encoder.act_loss, encoder.wei_loss, encoder.E, \
              encoder.R, encoder.loss_plot], feed_dict=feed_dict)

            if i % 500 == 0:

                fig, ax = plt.subplots(3, 5, figsize=(14, 8))
                for t in range(5):
                    ax[0, t].imshow(trial_info['neural_input'][:, t, :].T,
                                    aspect='auto')
                    ax[1, t].imshow(rec[:, t, :].T, aspect='auto')
                    ax[2, t].imshow(enc[:, t, :].T, aspect='auto')
                    ax[0, t].set_title('Trial {}'.format(t))
                    for p in range(3):
                        ax[p, t].set_xticks([])
                        ax[p, t].set_yticks([])

                ax[0, 0].set_ylabel('Stimulus Input')
                ax[1, 0].set_ylabel('Reconstruction')
                ax[2, 0].set_ylabel('Encoding')
                ax[2, 0].set_xlabel('Time')

                fig.suptitle('Input, Reconstruction, and Encoding')
                plt.savefig('./savedir/input_rec_and_enc.png',
                            bbox_inches='tight')
                plt.clf()
                plt.close()

                print(
                    'Iter: {:>6} | Rec. Loss: {:7.5f} | Act. Loss: {:7.5f} | Wei. Loss: {:7.5f}'
                    .format(i, rec_loss, act_loss, wei_loss))

        print('\nEncoder training complete.')

        # Record weights to check reconstruction
        W, U = sess.run([encoder.W, encoder.U])
        weights = {'W': W, 'U': U}

        if par['internal_sampling']:
            print('Sampling stimulus:')

            # Sample stimuli (enough for 200 batches, or ~100k trials, which should sample most of the distribution)
            for j in range(4):
                trial_info_list = []
                for i in range(50):

                    name, trial_info = stim.generate_trial()
                    feed_dict = {x: trial_info['neural_input']}
                    enc, = sess.run([encoder.E], feed_dict=feed_dict)
                    trial_info['encoded_input'] = enc

                    trial_info_list.append(trial_info)

                aggregated_trial_info = {}
                for key in (list(trial_info.keys()) + ['encoded_input']):
                    aggregated_trial_info[key] = np.concatenate(
                        [item[key] for item in trial_info_list], axis=1)

                # Put together save data and save
                save_data = {
                    'task': name,
                    'trial_info': aggregated_trial_info,
                    'weights': weights,
                    'rec_loss': rec_loss,
                    'act_loss': act_loss
                }
                pickle.dump(
                    save_data,
                    open(
                        './datadir/{}task_{}unit_input_encoding_part{}.pkl'.
                        format(name, par['n_latent'], j), 'wb'))

            print('Encoded stimulus samples saved.  Model complete. \n')

        else:
            print('Saving weights...')
            save_data = {
                'task': name,
                'weights': weights,
                'rec_loss': rec_loss,
                'act_loss': act_loss
            }
            pickle.dump(
                save_data,
                open(
                    './datadir/{}task_{}unit_input_encoder_weights.pkl'.format(
                        name, par['n_latent']), 'wb'))
            print('Encoder weights saved.  Model complete. \n')
Exemplo n.º 13
0
    h *= suppress_activity

    if par['synapse_config'] is None:
        syn_x_new = np.ones_like(h)
        syn_u_new = np.ones_like(h)

    return h, syn_x_new, syn_u_new


data_dir = './savedir/'
filename = data_dir + 'WMnew.pkl' # 保存的模型文件
results = pickle.load(open(filename, 'rb'))

update_parameters(results['parameters'])
stim = stimulus.Stimulus()

# generate trials with match probability at 50%
trial_info = stim.generate_trial(test_mode = True)
input_data = np.squeeze(np.split(trial_info['neural_input'], par['num_time_steps'], axis=0))

h_init = results['weights']['h']

y, h, syn_x, syn_u = run_model(input_data, h_init, \
    results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])

syn_efficacy = syn_x*syn_u

'''
# generate trials with random sample and test stimuli, used for decoding
trial_info_decode = stim.generate_trial(test_mode = True)
Exemplo n.º 14
0
    def make_matrix(self, filename, method, N):
        x = pickle.load(open(filename, 'rb'))
        beh_threshold = 0.1
        val_th = 0.1
        ind_accurate = np.where(np.array(x['accuracy_hist']) > 0.98)[0]
        #N = np.argmax(ind_accurate)
        #N = 6
        print('N = ', N)

        if method == 'elim_lesion' or method == 'elim':
            parameters.update_parameters(x['par'])
            s = stimulus.Stimulus()
            trial_info = s.generate_trial()

        if method == 'lesion':
            significant_weights_rnn = x['model_performance']['accuracy'][
                -1] - x['lesion_accuracy_rnn'][0, :, :] > beh_threshold
            significant_weights_out = x['model_performance']['accuracy'][
                -1] - x['lesion_accuracy_out'][0, :, :] > beh_threshold
            v = np.array([0]*x['parameters']['num_exc_units'] + [1]*x['parameters']['num_inh_units'] \
                + [2]*x['parameters']['n_output'])
            W = np.vstack((significant_weights_rnn, significant_weights_out))
            d = W.shape[0] - W.shape[1]
            W = np.hstack((W, np.zeros((W.shape[0], d))))

        elif method == 'elim':
            num_units = 50 - N
            w1 = np.zeros((num_units, num_units))
            w2 = np.zeros((3, num_units))
            ind = np.where(x['gate_hist'][N] > 0)[0]
            for i in range(num_units):
                for j in range(num_units):
                    w1[i, j] = x['weights_hist'][N]['w_rnn'][ind[i],
                                                             ind[j]] > val_th
                for j in range(3):
                    w2[j,
                       i] = x['weights_hist'][N]['w_out'][j, ind[i]] > val_th
            n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))
            n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))
            v = np.array([0] * n_exc + [1] * n_inh +
                         [2] * x['par']['n_output'])
            W = np.vstack((w1, w2))
            d = W.shape[0] - W.shape[1]
            W = np.hstack((W, np.zeros((W.shape[0], d))))

        elif method == 'elim_lesion':
            num_units = 50 - N
            r = analysis.lesion_weights(trial_info, x['par']['h_init'], x['par']['syn_x_init'], x['par']['syn_u_init'], \
                x['weights_hist'][N], x['gate_hist'][N])
            #plt.imshow(np.squeeze(r['lesion_accuracy_rnn']), aspect='auto', interpolation = 'none')
            #plt.colorbar()
            #plt.show()
            w1_full = np.tile(
                x['accuracy_hist'][N],
                (x['par']['n_hidden'], x['par']['n_hidden'])) - np.squeeze(
                    r['lesion_accuracy_rnn']) > beh_threshold
            w2_full = np.tile(
                x['accuracy_hist'][N],
                (x['par']['n_output'], x['par']['n_hidden'])) - np.squeeze(
                    r['lesion_accuracy_out']) > beh_threshold
            w1 = np.zeros((num_units, num_units))
            w2 = np.zeros((3, num_units))
            ind = np.where(x['gate_hist'][N] > 0)[0]
            for i in range(num_units):
                for j in range(num_units):
                    w1[i, j] = w1_full[ind[i], ind[j]]
                for j in range(3):
                    w2[j, i] = w2_full[j, ind[i]]
            #plt.imshow(w1, aspect='auto', interpolation = 'none')
            #plt.colorbar()
            #plt.show()
            print('accuracy ', x['accuracy_hist'][N])
            n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))
            n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))
            v = np.array([0] * n_exc + [1] * n_inh +
                         [2] * x['par']['n_output'])
            W = np.vstack((w1, w2))
            d = W.shape[0] - W.shape[1]
            W = np.hstack((W, np.zeros((W.shape[0], d))))
            plt.imshow(W, aspect='auto', interpolation='none')
            plt.colorbar()
            plt.show()
            print(v)

        elif method == 'stacked':
            W = []
            for i in range(x['W_rnn'].shape[0]):
                w1 = np.reshape(x['W_rnn'][i, :], (50, 50)) > 0.2
                w2 = np.reshape(x['W_out'][i, :], (3, 50)) > 0.2
                v = np.array([0] * 40 + [1] * 10 + [2] * 3)
                W1 = np.vstack((w1, w2))
                d = W1.shape[0] - W1.shape[1]
                W1 = np.hstack((W1, np.zeros((W1.shape[0], d))))
                W.append(W1)

        return W, v
Exemplo n.º 15
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    """
    Reset TensorFlow before running anything
    """
    tf.reset_default_graph()
    """
    Create the stimulus class to generate trial paramaters and input activity
    """
    stim = stimulus.Stimulus()

    n_input, n_hidden, n_output = par['shape']
    N = par[
        'batch_train_size']  # trials per iteration, calculate gradients after batch_train_size
    """
    Define all placeholder
    """
    mask = tf.placeholder(
        tf.float64, shape=[par['num_time_steps'], par['batch_train_size']])
    x = tf.placeholder(
        tf.float64,
        shape=[n_input, par['num_time_steps'],
               par['batch_train_size']])  # input data
    target = tf.placeholder(tf.float64,
                            shape=[
                                par['n_output'], par['num_time_steps'],
                                par['batch_train_size']
                            ])  # input data
    actual_reward = tf.placeholder(
        tf.float64, shape=[par['num_time_steps'], par['batch_train_size']])
    pred_reward = tf.placeholder(
        tf.float64, shape=[par['num_time_steps'], par['batch_train_size']])
    actual_action = tf.placeholder(tf.float64,
                                   shape=[
                                       par['num_time_steps'], par['n_output'],
                                       par['batch_train_size']
                                   ])

    config = tf.ConfigProto()
    #config.gpu_options.allow_growth=True

    # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use
    with tf.Session(config=config) as sess:

        if gpu_id is not None:
            model = Model(x, target, actual_reward, pred_reward, actual_action,
                          mask)
        else:
            #with tf.device("/gpu:0"):
            model = Model(x, target, actual_reward, pred_reward, actual_action,
                          mask)
        init = tf.global_variables_initializer()
        sess.run(init)

        # keep track of the model performance across training
        model_performance = {
            'accuracy': [],
            'loss': [],
            'perf_loss': [],
            'spike_loss': [],
            'trial': []
        }

        for i in range(par['num_iterations']):

            # generate batch of batch_train_size
            trial_info = stim.generate_trial()
            """
            Run the model
            """
            pol_out, val_out, pol_rnn, action, stacked_mask, reward = sess.run([model.pol_out, model.val_out, model.h_pol, model.action, \
                 model.stacked_mask,model.reward], {x: trial_info['neural_input'], target: trial_info['desired_output'], mask: trial_info['train_mask']})

            trial_reward = np.squeeze(np.stack(reward))
            trial_action = np.stack(action)
            #plt.imshow(np.squeeze(trial_reward))
            #plt.colorbar()
            #plt.show()

            _, _, pol_loss, val_loss = sess.run([model.train_pol, model.train_val, model.pol_loss, model.val_loss], \
                {x: trial_info['neural_input'], target: trial_info['desired_output'], mask: trial_info['train_mask'], \
                actual_reward: trial_reward, pred_reward: np.squeeze(val_out), actual_action:trial_action })

            accuracy, _, _ = analysis.get_perf(trial_info['desired_output'],
                                               action,
                                               trial_info['train_mask'])

            #model_performance = append_model_performance(model_performance, accuracy, val_loss, pol_loss, spike_loss, (i+1)*N)
            """
            Save the network model and output model performance to screen
            """
            if i % par['iters_between_outputs'] == 0 and i > 0:
                print_results(i, N, pol_loss, 0., pol_rnn, accuracy)
                r = np.squeeze(np.sum(np.stack(trial_reward), axis=0))
                print('Mean mask', np.mean(stacked_mask), ' val loss ',
                      val_loss, ' reward ', np.mean(r), np.max(r))
                #plt.imshow(np.squeeze(stacked_mask[:,:]))
                #plt.colorbar()
                #plt.show()
                #plt.imshow(np.squeeze(trial_reward))
                #plt.colorbar()
                #plt.show()
        """
        Save model, analyze the network model and save the results
        """
        #save_path = saver.save(sess, par['save_dir'] + par['ckpt_save_fn'])
        if par['analyze_model']:
            weights = eval_weights()
            analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \
                simulation = True, lesion = False, tuning = False, decoding = False, load_previous_file = False, save_raw_data = False)

            # Generate another batch of trials with test_mode = True (sample and test stimuli
            # are independently drawn), and then perform tuning and decoding analysis
            trial_info = stim.generate_trial(test_mode=True)
            y_hat, state_hist, syn_x_hist, syn_u_hist = \
                sess.run([model.y_hat, model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], \
                {x: trial_info['neural_input'], y: trial_info['desired_output'], mask: trial_info['train_mask']})
            analysis.analyze_model(trial_info, y_hat, state_hist, syn_x_hist, syn_u_hist, model_performance, weights, \
                simulation = False, lesion = False, tuning = par['analyze_tuning'], decoding = True, load_previous_file = True, save_raw_data = False)
Exemplo n.º 16
0
def main(gpu_id = None):
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    print(os.environ["CUDA_VISIBLE_DEVICES"])


    # Print key parameters
    print_important_params()

    # Reset TensorFlow before running anything
    tf.reset_default_graph()

    # Create the stimulus class to generate trial paramaters and input activity
    stim = stimulus.Stimulus()

    # Define all placeholder
    m = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size']], 'mask')
    x = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size'], par['n_input']], 'input')
    t = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size'], par['n_output']], 'target')

    # Make sure savedir exists
    if not os.path.exists(f'savedir/{gpu_id}/'):
        os.makedirs(f'savedir/{gpu_id}/')

    save_increment = 0

    # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use
    with tf.Session(config=tf.ConfigProto()) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'

        t0 = time.time()
        with tf.device(device):
            model = Model(x, t, m)
        print(f"Model initialized. Time elapsed: {str(time.time() - t0)}")
        print(par['shape'])

        sess.run(tf.global_variables_initializer())

        # keep track of the model performance across training
        t0 = time.time()

        # Set up records for memory bank performance storage
        memory_bank_performance = dict()
        for k in range(par['n_memory_banks']):
            memory_bank_performance[k] = []


        memory_bank_id = 0

        model_performance = {'accuracy': [], 'loss': [], 'perf_loss': [], 'spike_loss': [], \
        'weight_loss': [], 'iteration': []}

        i = 0
        while i < par['num_iterations']:

            # Generate batch of batch_train_size
            trial_info = stim.generate_trial(set_rule = None)

            # Run the model
            _, loss, perf_loss, spike_loss, weight_loss, y, h, syn_x, syn_u = \
                sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, \
                model.weight_loss, model.y, model.h, model.syn_x, model.syn_u], \
                {x: trial_info['neural_input'], t: trial_info['desired_output'], m: trial_info['train_mask']})

            accuracies = analysis.get_perf_sr(trial_info['desired_output'], y, trial_info['train_mask'])

            model_performance = append_model_performance(model_performance, accuracies, loss, perf_loss, spike_loss, weight_loss, i)

            if (np.mean(model_performance['accuracy'][-50:])) > 0.90 or (i == par['num_iterations'] - 1):
                memory_bank_id, i = update_memory_bank(memory_bank_id, memory_bank_performance)
                old_w = sess.run(model.var_dict)['w_rnn']
                old_w[old_w <= 1e-5] = 1e-3
                par['new_init'] = old_w
                _ = sess.run(model.resaturation_op)


            # Save the network model and output model performance to screen
            elif (i+1)%par['iters_between_outputs']==0:
                t1 = time.time()
                #accuracies = [analysis.get_perf_sr(trial_info['desired_output'], y[n,:,:,:], trial_info['train_mask'], True) for n in range(par['n_networks'])]
                print_results(i, perf_loss, spike_loss, weight_loss, h, accuracies)
                print(f"Elapsed time: {str(t1 - t0)}")
                t0 = time.time()

                #memory_bank_performance[memory_bank_id].append(accuracies)

                # Also: compute accuracies on each of the previous memory banks
                # (IDEA HERE: SHOULD WE CONSTRAIN S.T. NO INDIVIDUAL MEMORIES OVERLAP?)
                if memory_bank_id > 0:
                    print("Accuracy on previous memory banks:")
                for k in range(par['n_memory_banks']):
                    if k <= memory_bank_id:
                        par['memory_bank_id'] = k
                        update_dependencies()
                        trial_info = stim.generate_trial(set_rule = None, memory_bank_id = k)
                        h, y, syn_x, syn_u = sess.run([model.h, model.y_output, model.syn_x, model.syn_u],
                            {x: trial_info['neural_input'], t: trial_info['desired_output'], m: trial_info['train_mask']})

                        accuracies = analysis.get_perf_sr(trial_info['desired_output'], y, trial_info['train_mask'])
                        print(f"\tbank {k}: {accuracies}")
                        memory_bank_performance[k].append(accuracies)
                    else:
                        memory_bank_performance[k].append(0)

            i += 1

        # Save out activities
        trial_info = stim.generate_trial(set_rule = None, memory_bank_id = memory_bank_id)
        h, y, syn_x, syn_u = sess.run([model.h, model.y_output, model.syn_x, model.syn_u],
            {x: trial_info['neural_input'], t: trial_info['desired_output'], m: trial_info['train_mask']})

        # Save model and results
        weights = sess.run(model.var_dict)
        save_results(model_performance, weights, save_fn="sequence_reproduction_proof_of_concept.pkl")
Exemplo n.º 17
0
def main(gpu_id=None):

    print('\nRunning model.\n')

    ##################
    ### Setting Up ###
    ##################
    tf.reset_default_graph()
    """ Set up GPU """
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    """ Reset TensorFlow before running anything """
    tf.reset_default_graph()
    """ Set up performance recording """
    model_performance = {'accuracy': [], 'par': [], 'task_list': []}
    stim = stimulus.Stimulus()

    mask = tf.placeholder(
        tf.float32, shape=[p.par['num_time_steps'], p.par['batch_train_size']])
    x = tf.placeholder(tf.float32,
                       shape=[
                           p.par['n_input'], p.par['num_time_steps'],
                           p.par['batch_train_size']
                       ])
    y = tf.placeholder(tf.float32,
                       shape=[
                           p.par['n_output'], p.par['num_time_steps'],
                           p.par['batch_train_size']
                       ])
    z = tf.placeholder(
        tf.float32, shape=[p.par['num_networks'], p.par['generator_dims'][0]])
    """ Start TensorFlow session """
    with tf.Session() as sess:
        if gpu_id is None:
            model = Model(x, y, mask, z)
        else:
            with tf.device("/gpu:0"):
                model = Model(x, y, mask, z)

        # Initialize session variables
        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()

        # Restore variables from previous model if desired
        saver = tf.train.Saver()
        if p.par['load_previous_model']:
            saver.restore(sess, p.par['save_dir'] + p.par['ckpt_load_fn'])
            print('Model ' + p.par['ckpt_load_fn'] + ' restored.')

        generator_var_hist = []

        for k in range(p.par['num_network_iters']):
            print('NETWORK ITERATION ', k)
            generator_var = np.random.normal(0,
                                             1,
                                             size=(p.par['num_networks'],
                                                   p.par['generator_dims'][0]))
            generator_var_hist.append(generator_var)

            for i in range(p.par['num_iterations']):

                # generate batch of batch_train_size
                trial_info = stim.generate_trial(k)
                """
                Run the model
                """
                _,_, total_loss, perf_loss, spike_loss, aux_loss, network_output = sess.run([model.train_op, \
                    model.update_small_omega, model.total_loss, model.perf_loss, \
                    model.spike_loss,  model.aux_loss, model.networks_output], {x: trial_info['neural_input'], \
                    y: trial_info['desired_output'], mask: trial_info['train_mask'], z: generator_var})

                if (i + 1
                    ) % p.par['iters_between_outputs'] == 0:  # and i != 0:
                    accuracy = get_perf(trial_info['desired_output'],
                                        network_output,
                                        trial_info['train_mask'])
                    iteration_time = time.time() - t_start
                    iterstr = 'Iter. {:>4}'.format(i)
                    timestr = 'Time. {:>7.4}'.format(iteration_time)
                    lossstr = 'Total Loss: {:>7.4}'.format(total_loss)
                    auxstr = 'Aux Loss: {:>7.4}'.format(aux_loss)
                    #perfstr = 'Perf. Loss: {:>7.4} +/- {:<7.4}'.format(np.mean(perf_loss), np.std(perf_loss))
                    #spikstr = 'Spike Loss: {:>7.4} +/- {:<7.4}'.format(np.mean(spike_loss), np.std(spike_loss))
                    #wirestr = 'Wiring Loss: {:>7.4} +/- {:<7.4}'.format(np.mean(wiring_loss), np.std(wiring_loss))
                    perfstr = 'Perf. Loss: {:>7.4}'.format(np.mean(perf_loss))
                    spikstr = 'Spike Loss: {:>7.4}'.format(np.mean(spike_loss))
                    #wirestr = 'Wiring Loss: {:>7.4}'.format(np.mean(wiring_loss))
                    accuracystr = 'Accuracy: {:>7.4} +/- {:<7.4}'.format(
                        np.mean(accuracy), np.std(accuracy))

                    print(' | '.join([
                        str(x) for x in [
                            iterstr, timestr, perfstr, spikstr, accuracystr,
                            auxstr
                        ]
                    ]))

                if (i + 1) % 1000 == 0:
                    num_reps = 50
                    accuracy = get_perf(trial_info['desired_output'],
                                        network_output,
                                        trial_info['train_mask'])

                    novel_accuracy = []
                    for r in range(num_reps):
                        novel_var = np.random.normal(
                            0, 1, size=(1, p.par['generator_dims'][0]))
                        tt = np.mean(novel_var) > 0
                        trial_info = stim.generate_trial()
                        network_output = sess.run(model.networks_output, {x: trial_info['neural_input'], \
                            y: trial_info['desired_output'], mask: trial_info['train_mask'], z: novel_var})
                        novel_accuracy.append(get_perf(trial_info['desired_output'], network_output, \
                            trial_info['train_mask']))
                    previous_accuracy = []
                    for r in range(len(generator_var_hist)):
                        network_output = sess.run(model.networks_output, {x: trial_info['neural_input'], \
                            y: trial_info['desired_output'], mask: trial_info['train_mask'], z: generator_var_hist[r]})
                        previous_accuracy.append(get_perf(trial_info['desired_output'], network_output, \
                            trial_info['train_mask']))
                    accuracystr = 'Accuracy: {:>7.4} +/- {:<7.4}'.format(
                        np.mean(novel_accuracy), np.std(novel_accuracy))
                    prev_accuracystr = 'Accuracy: {:>7.4} +/- {:<7.4}'.format(
                        np.mean(previous_accuracy), np.std(previous_accuracy))
                    print('Novel ', accuracystr)
                    print('Previous ', prev_accuracystr)
                    print('Saving data...')
                    save_data(accuracy, novel_accuracy, previous_accuracy)

            # Update big omegaes, and reset other values before starting new task
            big_omegas = sess.run(
                [model.update_big_omega, model.big_omega_var])

            # Reset the Adam Optimizer, and set the previous parater values to their current values
            sess.run(model.reset_adam_op)
            sess.run(model.reset_prev_vars)
            sess.run(model.reset_small_omega)
Exemplo n.º 18
0
def main(gpu_id=None):

    # Print out context
    print_important_params()

    # Select GPU
    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    # Reduce memory consumption for GPU 0
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8) \
     if gpu_id == '3' else tf.GPUOptions()

    # Initialize stimulus environment
    environment = stimulus.Stimulus()

    # Reset graph and designate placeholders
    tf.reset_default_graph()
    stim_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_input']],
                             'stim')
    prev_latent_pl = tf.placeholder(tf.float32,
                                    [par['batch_size'], par['n_latent']],
                                    'prev_latent')
    latent_pl = tf.placeholder(tf.float32,
                               [par['batch_size'], par['n_latent']], 'latent')
    hop_latent_pl = tf.placeholder(tf.float32,
                                   [par['batch_size'], par['n_latent']],
                                   'hp_latent')
    prev_action_pl = tf.placeholder(tf.float32,
                                    [par['batch_size'], par['n_pol']],
                                    'prev_action')
    action_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_pol']],
                               'action')
    reward_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_val']],
                               'reward')
    prev_reward_pl = tf.placeholder(tf.float32,
                                    [par['batch_size'], par['n_unique_vals']],
                                    'prev_reward')
    prev_val_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_val']],
                                 'prev_val')
    future_val_pl = tf.placeholder(tf.float32,
                                   [par['batch_size'], par['n_val']],
                                   'future_val')

    # Start TensorFlow session
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Set up and initialize model on desired device
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(stim_pl, prev_latent_pl, latent_pl, hop_latent_pl, prev_reward_pl, \
             reward_pl, prev_action_pl, action_pl, prev_val_pl, future_val_pl)

        sess.run(tf.global_variables_initializer())

        # Start training loop
        for i in range(par['num_batches']):

            # Reset environment at the start of each iteration
            environment.reset_agents()
            environment.reset_rewards()
            x_read_list = []

            # Pre-allocate prev_val and total_reward
            prev_latent = np.zeros((par['batch_size'], par['n_latent']),
                                   dtype=np.float32)
            prev_action = np.zeros((par['batch_size'], par['n_pol']),
                                   dtype=np.float32)
            prev_reward_scalar = np.zeros((par['batch_size'], par['n_val']),
                                          dtype=np.float32)
            prev_reward_matrix = np.zeros(
                (par['batch_size'], par['n_unique_vals']), dtype=np.float32)
            prev_val = np.zeros((par['batch_size'], par['n_val']),
                                dtype=np.float32)
            total_reward = np.zeros((par['batch_size'], 1), dtype=np.float32)

            action_record = []
            H_stim_record = []
            H_act_f_record = []

            # Iterate through time
            for t in range(par['num_time_steps']):

                # Make inputs
                stim_in = environment.make_inputs()

                # Generate the latent, train encoder weights
                _, reconstruction_loss, latent_loss, latent, latent_mu, latent_log_var, sparsity_loss = \
                 sess.run([model.train_encoder, model.reconstruction_loss, model.latent_loss, \
                 model.latent, model.latent_mu, model.latent_log_var, model.sparsity_loss], \
                 feed_dict = {stim_pl: stim_in})

                agent_location = environment.get_agent_locs()

                # Generate the policy and value functions
                pol, val, hopfield_read = sess.run([model.pol_out, model.val_out, model.hopfield_read], \
                 feed_dict = {latent_pl: latent})

                # Choose action, calculate reward and determine next state
                action = np.array([
                    np.random.multinomial(1, pol[t, :] - 1e-6)
                    for t in range(par['batch_size'])
                ])
                agent_locs = environment.get_agent_locs()
                reward = environment.agent_action(action)
                reward_one_hot = np.zeros(
                    (par['batch_size'], par['n_unique_vals']),
                    dtype=np.float32)
                reward_one_hot[np.arange(par['batch_size']),
                               np.int8(np.squeeze(reward))] = 1.

                if t < 0:
                    latent /= (
                        1e-9 +
                        np.sqrt(np.sum(latent**2, axis=1, keepdims=True)))
                    prev_latent /= (
                        1e-9 +
                        np.sqrt(np.sum(prev_latent**2, axis=1, keepdims=True)))
                    z = np.sum(latent * prev_latent, axis=1)
                    print(t, agent_location[0, :], action[0, :], ' Dot prod ',
                          z[0])
                    plt.imshow(np.reshape(hopfield_read[0, :], (4, 2)),
                               aspect='auto')
                    plt.colorbar()
                    plt.title('Location ' + str(agent_location[0, :]))
                    plt.show()

                #action_record.append(action)

                # Update total reward and prev_val
                total_reward += reward

                # Update the Hopfield network
                sess.run([model.update_hopfield_with_dep, model.train_RL, model.hopfield_read], \
                 feed_dict = {latent_pl: prev_latent, action_pl: prev_action, reward_pl: prev_reward_scalar, \
                 future_val_pl: val, prev_latent_pl : prev_latent, hop_latent_pl : latent, \
                 prev_action_pl: prev_action, prev_reward_pl : prev_reward_matrix})

                prev_latent = latent + 0.
                prev_reward_matrix = reward_one_hot + 0.
                prev_reward_scalar = reward + 0.
                prev_action = action + 0.
                prev_val = val + 0.

                if t == -1:
                    z_read = np.stack(x_read_list, axis=0)
                    fig, ax = plt.subplots(2, 2, figsize=[8, 8])
                    #ax[0,0].imshow(z_read[:, 0, :], aspect = 'auto')
                    #ax[0,1].imshow(z_read[:, 1, :], aspect = 'auto')
                    #ax[1,0].imshow(z_read[:, 2, :], aspect = 'auto')
                    #ax[1,1].imshow(z_read[:, 3, :], aspect = 'auto')

                    ax[0, 0].imshow(H_stim_record[0], aspect='auto')
                    ax[0, 1].imshow(H_stim_record[100], aspect='auto')
                    ax[1, 0].imshow(H_stim_record[300], aspect='auto')
                    ax[1, 1].imshow(H_stim_record[-1], aspect='auto')

                    fig, ax = plt.subplots(2, 2, figsize=[8, 8])
                    ax[0, 0].imshow(H_act_f_record[0], aspect='auto')
                    ax[0, 1].imshow(H_act_f_record[100], aspect='auto')
                    ax[1, 0].imshow(H_act_f_record[300], aspect='auto')
                    ax[1, 1].imshow(H_act_f_record[-1], aspect='auto')

                    plt.show()

                # Reset agents that have obtained a reward
                environment.reset_agents(reward != 0.)

            # Analyze actions
            #action_record = np.concatenate(action_record, axis=0)
            #action_record = np.round(np.mean(action_record, axis=0), 2).tolist()

            # Output network performance
            if i % 10 == 0:
                H_sas, H_sar = sess.run([model.H_sas, model.H_sar])
                #print('Iter {:>4} | Mean Reward: {:6.3f} | Recon Loss: {:8.6f} | Sparsity Loss: {:8.6f} | Sim act.: {:8.6f} | Action Dist: {}'.format(\
                #	i, np.mean(total_reward), rec_loss, sparsity_loss, sim_active, action_record))
                print('Iter {:>4} | Mean Reward: {:6.3f} | Recon Loss: {:8.6f} | Latent Loss: {:8.6f} |'.format(\
                 i, np.mean(total_reward), reconstruction_loss, latent_loss))
                print('          | Latent Mu: {:8.6f} | Latent Var: {:8.6f}  | Sparisty Loss: {:8.6f}'.format(\
                 np.mean(latent_mu), np.mean(latent_log_var), sparsity_loss))
                print('          | H_sas', np.mean(H_sas**2), '| H_sar',
                      np.mean(H_sar**2))
            if i % 200 == 0 and par['train_encoder'] and i > 0:
                weights = sess.run(model.var_dict)
                results = {'weights': weights, 'latent': latent}
                pickle.dump(results,
                            open('./savedir/VAE_8x8_model_weights3.pkl', 'wb'))

            # Update model weights
            sess.run(model.update_weights)
            sess.run(model.reset_hopfield)
Exemplo n.º 19
0
def main(gpu_id=None):

    # Print out context
    # Select GPU
    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    # Reduce memory consumption for GPU 0
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8) \
     if gpu_id == '3' else tf.GPUOptions()

    # Initialize stimulus environment
    stim = stimulus.Stimulus()

    # Reset graph and designate placeholders
    tf.reset_default_graph()
    x = tf.placeholder(tf.float32, [par['batch_size'], 28, 28, 1], 'input')
    y = tf.placeholder(tf.int32, [par['batch_size']], 'output')
    k = tf.placeholder(tf.float32, [], 'keep_prob')

    # Start TensorFlow session
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Set up and initialize model on desired device
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, k)
        sess.run(tf.global_variables_initializer())

        # Start training loop
        print('-' * 20 + '\nStarting training.')
        for i in range(par['iterations']):

            images, labels = stim.make_batch()
            train_get_list = [model.train]
            _, = sess.run(train_get_list,
                          feed_dict={
                              x: images,
                              y: labels,
                              k: par['dropout_keep_prob']
                          })

            if i % 100 == 0:

                images, labels = stim.make_batch(test=True)
                test_get_list = [
                    model.output, model.recon, model.task_loss,
                    model.recon_loss, model.latent_loss
                ]
                output, recon, task_loss, recon_loss, latent_loss = sess.run(
                    test_get_list, feed_dict={
                        x: images,
                        y: labels,
                        k: 1.
                    })

                acc = np.mean(np.argmax(output, axis=-1) == labels)
                print('Iter {:>5} | Accuracy: {:5.3f} | Task Loss: {:5.3f} | Recon Loss: {:5.3f} | Latent Loss: {:5.3f}'.format(\
                 i, acc, task_loss, recon_loss, latent_loss))

        print('\nTraining complete.  Recording results.')

        images, labels = stim.make_batch(test=True)
        test_get_list = [
            model.output, model.recon, model.task_loss, model.recon_loss,
            model.latent_loss
        ]
        output, recon, task_loss, recon_loss, latent_loss = sess.run(
            test_get_list, feed_dict={
                x: images,
                y: labels,
                k: 1.
            })
        acc = np.mean(np.argmax(output, axis=-1) == labels)

        data = {
            'parameters': par,
            'images': images,
            'labels': labels,
            'recons': recon,
            'weights': sess.run([model.var_dict])[0],
            'accuracy': acc
        }

        pickle.dump(data, open('./savedir/' + par['savefn'] + '.pkl', 'wb'))
        print('Results saved.  Model complete.')
Exemplo n.º 20
0
def main():
    """
    Reset TensorFlow before running anything
    """
    tf.reset_default_graph()
    """
    Create the stimulus class to generate trial paramaters and input activity
    """
    stim = stimulus.Stimulus()

    n_input, n_hidden, n_output = par['shape']
    N = par['batch_train_size'] * par[
        'num_batches']  # trials per iteration, calculate gradients after batch_train_size
    """
    Define all placeholder
    """
    mask = tf.placeholder(
        tf.float32, shape=[par['num_time_steps'], par['batch_train_size']])
    x = tf.placeholder(
        tf.float32,
        shape=[n_input, par['num_time_steps'],
               par['batch_train_size']])  # input data
    y = tf.placeholder(
        tf.float32,
        shape=[n_output, par['num_time_steps'],
               par['batch_train_size']])  # target data

    # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use
    with tf.Session() as sess:

        #with tf.device("/gpu:0"):
        model = Model(x, y, mask)
        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()

        saver = tf.train.Saver()
        # Restore variables from previous model if desired
        if par['load_previous_model']:
            saver.restore(sess, par['save_dir'] + par['ckpt_load_fn'])
            print('Model ' + par['ckpt_load_fn'] + ' restored.')

        # keep track of the model performance across training
        model_performance = {
            'accuracy': [],
            'loss': [],
            'perf_loss': [],
            'spike_loss': [],
            'trial': [],
            'time': []
        }

        for i in range(par['num_iterations']):

            # generate batch of N (batch_train_size X num_batches) trials
            trial_info = stim.generate_trial()

            # keep track of the model performance for this batch
            loss = np.zeros((par['num_batches']))
            perf_loss = np.zeros((par['num_batches']))
            spike_loss = np.zeros((par['num_batches']))
            accuracy = np.zeros((par['num_batches']))

            for j in range(par['num_batches']):
                """
                Select batches of size batch_train_size
                """
                ind = range(j * par['batch_train_size'],
                            (j + 1) * par['batch_train_size'])
                target_data = trial_info['desired_output'][:, :, ind]
                input_data = trial_info['neural_input'][:, :, ind]
                train_mask = trial_info['train_mask'][:, ind]
                """
                Run the model
                If learning rate > 0, then also run the optimizer;
                if learning rate = 0, then skip optimizer
                """
                if par['learning_rate'] > 0:
                    _, loss[j], perf_loss[j], spike_loss[j], y_hat, state_hist, W_rnn,  = \
                        sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.y_hat, \
                        model.hidden_state_hist, model.WY], {x: input_data, y: target_data, mask: train_mask})
                else:
                    loss[j], perf_loss[j], spike_loss[j], y_hat, state_hist, W_rnn = \
                        sess.run([model.loss, model.perf_loss, model.spike_loss, model.y_hat, model.hidden_state_hist, \
                        model.WY], {x: input_data, y: target_data, mask: train_mask})

                accuracy[j] = analysis.get_perf(target_data, y_hat, train_mask)

            iteration_time = time.time() - t_start
            model_performance = append_model_performance(
                model_performance, accuracy, loss, perf_loss, spike_loss,
                (i + 1) * N, iteration_time)
            """
            Save the network model and output model performance to screen
            """
            if (i + 1) % par['iters_between_outputs'] == 0 or i + 1 == par[
                    'num_iterations']:
                print_results(i, N, iteration_time, perf_loss, spike_loss,
                              state_hist, accuracy)
                save_path = saver.save(sess,
                                       par['save_dir'] + par['ckpt_save_fn'])
        """
        Analyze the network model and save the results
        """
        if par['analyze_model']:
            weights = eval_weights(W_rnn)
            analysis.analyze_model(trial_info, y_hat, state_hist,
                                   model_performance, weights)
Exemplo n.º 21
0
def main(gpu_id):

    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    """
    Reset TensorFlow before running anything
    """
    tf.reset_default_graph()
    """
    Create the stimulus class to generate trial paramaters and input activity
    """
    stim = stimulus.Stimulus()
    """
    Define all placeholder
    """
    mask = tf.placeholder(
        tf.float32, shape=[par['num_time_steps'], par['batch_train_size']])
    x = tf.placeholder(tf.float32,
                       shape=[
                           par['num_motion_dirs'], par['num_time_steps'],
                           par['batch_train_size']
                       ])  # input data
    y = tf.placeholder(tf.float32,
                       shape=[
                           par['n_output'], par['num_time_steps'],
                           par['batch_train_size']
                       ])  # target data

    with tf.Session() as sess:

        #with tf.device("/gpu:0"):
        model = Model(x, y, mask, True)
        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()

        for i in range(par['num_iterations']):

            # generate batch of batch_train_size
            trial_info = stim.generate_trial(i // par['iters_per_group'])
            """
            plt.imshow(trial_info['desired_output'][:,:, 0], aspect = 'auto')
            plt.show()
            plt.imshow(trial_info['neural_input'][:,:, 0], aspect = 'auto')
            plt.show()
            """
            _, loss, perf_loss, spike_loss, weight_loss, y_hat, state_hist = \
                sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, \
                model.weight_loss, model.y_hat, \
                model.hidden_state_hist], {x: trial_info['neural_input'], \
                y: trial_info['desired_output'], mask: trial_info['train_mask']})

            if i == par['iters_per_group']:
                sess.run(model.reset_weights)

            iteration_time = time.time() - t_start
            if (i + 1) % par['iters_between_outputs'] == 0 or i + 1 == par[
                    'num_iterations']:
                print_results(i, iteration_time, perf_loss, spike_loss,
                              weight_loss, trial_info['desired_output'])
                y1 = np.stack(y_hat, axis=1)
                """

                ind = np.argsort(np.var(np.mean(trial_info['desired_output'],axis=2), axis=1))
                plt.subplot(2,2,1)
                plt.plot(trial_info['desired_output'][ind[-1], :, 0])
                plt.plot(y1[ind[-1], :, 0],'r')
                plt.subplot(2,2,2)
                plt.plot(trial_info['desired_output'][ind[-2], :, 0])
                plt.plot(y1[ind[-2], :, 0],'r')
                plt.subplot(2,2,3)
                plt.plot(trial_info['desired_output'][ind[-3], :, 0])
                plt.plot(y1[ind[-3], :, 0],'r')
                plt.subplot(2,2,4)
                plt.plot(trial_info['desired_output'][ind[-4], :, 0])
                plt.plot(y1[ind[-4], :, 0],'r')
                plt.show()
                """

                weights = eval_weights()
                save_results = {'weights': weights, 'par': par, 'y_out': y1, 'y_target':  trial_info['desired_output'], \
                    'h': state_hist, 'input': trial_info['neural_input']}
                pickle.dump(
                    save_results,
                    open(par['save_dir'] + 'saved_results_wc1e6_v0.pkl', 'wb'))
Exemplo n.º 22
0
def main(gpu_id=None, code_state=historian.record_code_state()):
    """ Run supervised learning training """

    # Isolate requested GPU
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Reset TensorFlow graph before running anything
    tf.reset_default_graph()

    # Define all placeholders
    x, y, m, l, ci, cj, h, sx, su = get_placeholders()

    # Set up stimulus and model performance recording
    stim = stimulus.Stimulus()
    model_performance = {
        'accuracy': [],
        'pulse_accuracy': [],
        'loss': [],
        'perf_loss': [],
        'spike_loss': [],
        'trial': []
    }

    # Start TensorFlow session
    with tf.Session() as sess:

        # Select CPU or GPU
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, m, l, ci, cj, h, sx, su)

        # Initialize variables and start the timer
        sess.run(tf.global_variables_initializer())
        t_start = time.time()

        # Begin training loop
        print('\nStarting training...\n')
        acc_count = int(0)
        accuracy_threshold = \
            np.array([0.0, 0.6, 0.7, 0.8, 0.9, 0.95, 0.96, \
                      0.97, 0.98])
        save_fn = par['save_dir'] + par['save_fn']
        save_fn_ind = save_fn[1:].find('.') - 1

        for i in range(par['num_iterations']):

            # Generate a batch of stimulus for training
            trial_info = shuffle_trials(stim)

            # Put together the feed dictionary
            feed_dict = {
                x: trial_info['neural_input'],
                y: trial_info['desired_output'],
                m: trial_info['train_mask']
            }

            # Run the model
            _, loss, perf_loss, spike_loss, y_hat, state_hist, syn_x_hist, syn_u_hist = \
                sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.y_hat, \
                model.hidden_hist, model.syn_x_hist, model.syn_u_hist], feed_dict=feed_dict)

            # Calculate accuracy from the model's output
            if par['output_type'] == 'directional':
                accuracy, pulse_accuracy = analysis.get_coord_perf(
                    trial_info['desired_output'], y_hat,
                    trial_info['train_mask'], trial_info['pulse_id'])
            elif par['output_type'] == 'one_hot':
                accuracy, pulse_accuracy = analysis.get_perf(
                    trial_info['desired_output'], y_hat,
                    trial_info['train_mask'], trial_info['pulse_id'])

            # Record the model's performance
            model_performance = append_model_performance(
                model_performance, accuracy, pulse_accuracy, loss, perf_loss,
                spike_loss, (i + 1) * par['batch_train_size'])

            # Save and show the model's performance
            if i % par[
                    'iters_between_outputs'] == 0:  #in list(range(len(par['trial_type']))):
                print_results(i, par['trial_type'], perf_loss, spike_loss,
                              state_hist, accuracy, pulse_accuracy)

            # if i%200 in list(range(len(par['trial_type']))):
            #     weights = sess.run(model.var_dict)
            #     results = {
            #         'model_performance': model_performance,
            #         'parameters': par,
            #         'weights': weights}
            #     pickle.dump(results, open(par['save_dir'] + par['save_fn'], 'wb') )
            #     if i>=5 and all(np.array(model_performance['accuracy'][-5:]) > accuracy_threshold[acc_count]):
            #         break

            if i > 5 and all(
                    np.array(model_performance['accuracy'][-5:]) >
                    accuracy_threshold[acc_count]):

                print('SAVING')

                weights = sess.run(model.var_dict)
                results = {
                    'model_performance': model_performance,
                    'parameters': par,
                    'weights': weights,
                    'code_state': code_state
                }
                acc_str = str(int(accuracy_threshold[acc_count] * 100))
                sf = save_fn[:-4] + '_acc' + acc_str + save_fn[-4:]
                print(sf)
                pickle.dump(results, open(sf, 'wb'))
                acc_count += 1
                if acc_count >= len(accuracy_threshold):
                    break

        # If required, save the model, analyze it, and save the results
        if par['analyze_model']:
            weights = sess.run(model.var_dict)
            syn_x_stacked = np.stack(syn_x_hist, axis=1)
            syn_u_stacked = np.stack(syn_u_hist, axis=1)
            h_stacked = np.stack(state_hist, axis=1)
            trial_time = np.arange(0, h_stacked.shape[1] * par['dt'],
                                   par['dt'])
            mean_h = np.mean(np.mean(h_stacked, axis=2), axis=1)
            results = {
                'model_performance': model_performance,
                'parameters': par,
                'weights': weights,
                'trial_time': trial_time,
                'mean_h': mean_h
            }
            pickle.dump(results, open(par['save_dir'] + par['save_fn'], 'wb'))
Exemplo n.º 23
0
def main(gpu_id=None):

    # Print out context
    print_important_params()

    # Select GPU
    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    # Reduce memory consumption for GPU 0
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8) \
     if gpu_id == '0' else tf.GPUOptions()

    # Initialize stimulus environment
    environment = stimulus.Stimulus()

    # Reset graph and designate placeholders
    tf.reset_default_graph()
    stim_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_input']],
                             'stim')
    action_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_pol']],
                               'action')
    reward_pl = tf.placeholder(tf.float32, [par['batch_size'], par['n_val']],
                               'reward')
    future_val_pl = tf.placeholder(tf.float32,
                                   [par['batch_size'], par['n_val']],
                                   'prev_val')
    time_step_pl = tf.placeholder(tf.int32, [], 'time_step')

    # Start TensorFlow session
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Set up and initialize model on desired device
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(stim_pl, reward_pl, action_pl, future_val_pl,
                          time_step_pl)
        sess.run(tf.global_variables_initializer())

        # Start training loop
        for i in range(par['num_batches']):

            # Reset environment at the start of each iteration
            environment.reset_agents()
            environment.reset_rewards()

            # Pre-allocate prev_val and total_reward
            prev_stim = np.zeros((par['batch_size'], par['n_input']),
                                 dtype=np.float32)
            future_val = np.zeros((par['batch_size'], par['n_val']),
                                  dtype=np.float32)
            prev_action = np.zeros((par['batch_size'], par['n_pol']),
                                   dtype=np.float32)
            prev_reward = np.zeros((par['batch_size'], par['n_val']),
                                   dtype=np.float32)
            prev_t = 0
            total_reward = np.zeros((par['batch_size'], 1), dtype=np.float32)

            action_record = []

            # Iterate through time
            for t in range(par['num_time_steps']):

                # Make inputs
                stim_in = environment.make_inputs()

                # Train encoder weights, output the policy and value functions
                _, pol, val, rec_loss, sparsity_loss, x_read = sess.run([model.train_encoder, model.pol_out, model.val_out, \
                 model.reconstruction_loss, model.sparsity_loss, model.x_read], feed_dict = {stim_pl: stim_in})

                W = sess.run(model.var_dict)

                # Choose action, calculate reward and determine next state
                action = np.array([
                    np.random.multinomial(1, pol[t, :] - 1e-6)
                    for t in range(par['batch_size'])
                ])
                reward = environment.agent_action(action)
                action_record.append(action)

                # Update total reward and prev_val
                total_reward += reward

                if i > 100:
                    # Update the Hopfield network
                    sess.run([model.update_hopfield, model.train_RL], \
                     feed_dict = {stim_pl: prev_stim, action_pl: prev_action, reward_pl: prev_reward, \
                     future_val_pl: val, time_step_pl:prev_t})

                    prev_stim = stim_in
                    prev_reward = reward
                    prev_action = action
                    prev_t = t

                # Reset agents that have obtained a reward
                environment.reset_agents(reward != 0.)

            # Update model weights
            sess.run(model.update_weights)
            sess.run(model.reset_hopfield)

            # Analyze actions
            action_record = np.concatenate(action_record, axis=0)
            action_record = np.round(np.mean(action_record, axis=0),
                                     2).tolist()

            # Output network performance
            print('Iter {:>4} | Mean Reward: {:6.3f} | Recon Loss: {:8.6f} | Sparsity Loss: {:8.6f} | Action Dist: {}'.format(\
             i, np.mean(total_reward), rec_loss, sparsity_loss, action_record))
Exemplo n.º 24
0
def main(save_fn, gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # train the convolutional layers with the CIFAR-10 dataset
    # otherwise, it will load the convolutional weights from the saved file
    if (par['task'] == 'cifar' or par['task']
            == 'imagenet') and par['train_convolutional_layers']:
        convolutional_layers.ConvolutionalLayers()

    print('\nRunning model.\n')

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Create placeholders for the model
    # input_data, target_data, gating, mask, dropout keep pct hidden layers, dropout keep pct input layers

    if par['task'] == 'mnist':
        x = tf.placeholder(tf.float32,
                           [par['batch_size'], par['layer_dims'][0]], 'stim')
    elif par['task'] == 'cifar' or par['task'] == 'imagenet':
        x = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3], 'stim')
    y = tf.placeholder(tf.float32, [par['batch_size'], par['layer_dims'][-1]],
                       'out')
    mask = tf.placeholder(tf.float32,
                          [par['batch_size'], par['layer_dims'][-1]], 'mask')
    droput_keep_pct = tf.placeholder(tf.float32, [], 'dropout')
    input_droput_keep_pct = tf.placeholder(tf.float32, [], 'input_dropout')
    gating = [
        tf.placeholder(tf.float32, [par['layer_dims'][n + 1]], 'gating')
        for n in range(par['n_layers'] - 1)
    ]
    context_vector = tf.placeholder(tf.float32, [1, par['n_tasks']],
                                    'context_vector')

    stim = stimulus.Stimulus(labels_per_task=par['labels_per_task'])
    accuracy_full = []
    accuracy_grid = np.zeros((par['n_tasks'], par['n_tasks']))

    with tf.Session() as sess:

        if gpu_id is None:
            model = Model(x, y, gating, mask, droput_keep_pct,
                          input_droput_keep_pct, context_vector)
        else:
            with tf.device("/gpu:0"):
                model = Model(x, y, gating, mask, droput_keep_pct,
                              input_droput_keep_pct, context_vector)
        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()
        sess.run(model.reset_prev_vars)

        for task in range(par['n_tasks']):

            cont_vect = np.zeros((1, par['n_tasks']), dtype=np.float32)
            cont_vect[0, task] = 1.

            # create dictionary of gating signals applied to each hidden layer for this task
            gating_dict = {k: v for k, v in zip(gating, par['gating'][task])}

            for i in range(par['n_train_batches']):

                # make batch of training data
                stim_in, y_hat, mk = stim.make_batch(task, test=False)

                if par['stabilization'] == 'pathint':

                    _, _, loss, AL, gl = sess.run([model.train_op, model.update_small_omega, model.task_loss, model.aux_loss, model.gate_loss], \
                        feed_dict = {x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], context_vector:cont_vect})

                elif par['stabilization'] == 'EWC':
                    _,loss, AL, gl, weight_grads, h, entropy_loss = sess.run([model.train_op, model.task_loss, model.aux_loss, model.gate_loss,\
                        model.weight_grads, model.h, model.entropy_loss], feed_dict = \
                        {x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], context_vector:cont_vect})

                if i // 500 == i / 500:
                    print('Iter: ', i, 'Loss: ', loss, 'Aux Loss: ', AL,
                          'gate loss ', gl, 'entropy loss', entropy_loss)

            # Update big omegaes, and reset other values before starting new task
            if par['stabilization'] == 'pathint':
                big_omegas = sess.run(
                    [model.update_big_omega, model.big_omega_var])
            elif par['stabilization'] == 'EWC':
                for n in range(par['EWC_fisher_num_batches']):
                    stim_in, y_hat, mk = stim.make_batch(task, test=False)
                    _, _ = sess.run([model.update_big_omega,model.big_omega_var], feed_dict = \
                        {x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:1.0, \
                        input_droput_keep_pct:1.0, context_vector:cont_vect})
                    big_omegas = sess.run([model.big_omega_var])

                sess.run([model.reset_shunted_weights])

            # Reset the Adam Optimizer, and set the previous parater values to their current values
            sess.run(model.reset_adam_op)
            sess.run(model.reset_prev_vars)
            if par['stabilization'] == 'pathint':
                sess.run(model.reset_small_omega)

            # Test the netwroks on all trained tasks
            num_test_reps = 10
            accuracy = np.zeros((task + 1))
            for test_task in range(task + 1):
                cont_vect = np.zeros((1, par['n_tasks']), dtype=np.float32)
                cont_vect[0, test_task] = 1
                gating_dict = {
                    k: v
                    for k, v in zip(gating, par['gating'][test_task])
                }
                for r in range(num_test_reps):
                    stim_in, y_hat, mk = stim.make_batch(test_task, test=True)
                    acc = sess.run(model.accuracy, feed_dict={x:stim_in, y:y_hat, \
                        **gating_dict, mask:mk, droput_keep_pct:1.0, input_droput_keep_pct:1.0,\
                        context_vector:cont_vect})/num_test_reps
                    accuracy_grid[task, test_task] += acc
                    accuracy[test_task] += acc

            print('Task ', task, ' Mean ', np.mean(accuracy), ' First ',
                  accuracy[0], ' Last ', accuracy[-1])
            accuracy_full.append(np.mean(accuracy))

            # reset weights between tasks if called upon
            if par['reset_weights']:
                sess.run(model.reset_weights)

            above_zeros = []
            for i in range(len(h)):
                above_zeros.append(
                    np.float32(np.sum(h[i], axis=0, keepdims=True) > 1e-16))
                print('mean h above zero ', np.mean(above_zeros[i]))
            """
            for k in big_omegas[0].keys():
                plt.imshow(big_omegas[0][k], aspect = 'auto')
                plt.colorbar()
                plt.show()
                print(k, big_omegas[0][k].shape)
            """

        if par['save_analysis']:
            save_results = {'task': task, 'accuracy': accuracy, 'accuracy_full': accuracy_full, \
                            'accuracy_grid': accuracy_grid, 'big_omegas': big_omegas, 'par': par}
            pickle.dump(save_results, open(par['save_dir'] + save_fn, 'wb'))

    print('\nModel execution complete.')
Exemplo n.º 25
0
def main(gpu_id=None):
    """ Run training """

    # Isolate requested GPU
    if gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Define placeholders
    x = tf.placeholder(
        tf.float32, [par['num_time_steps'], par['batch_size'], par['n_input']],
        'stim')
    y = tf.placeholder(
        tf.float32,
        [par['num_time_steps'], par['batch_size'], par['n_output']], 'out')
    m = tf.placeholder(tf.float32, [par['num_time_steps'], par['batch_size']],
                       'mask')

    # Set up stimulus and recording
    stim = stimulus.Stimulus()
    data_record = {
        n: []
        for n in
        ['iter', 'acc', 'task_loss', 'spike_loss', 'entropy_loss', 'spiking']
    }

    # Start TensorFlow session
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8
                                ) if gpu_id == '0' else tf.GPUOptions()
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Select CPU or GPU
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, m)

        # Initialize variables and start timer
        sess.run(tf.global_variables_initializer())
        t_start = time.time()

        # Begin training loop, iterating over tasks
        for i in range(par['n_train_batches']):

            # Generate a batch of stimulus data for training
            trial_info = stim.make_batch()

            # Put together the feed dictionary
            feed_dict = {
                x: trial_info['neural_input'],
                y: trial_info['desired_output'],
                m: trial_info['train_mask']
            }

            # Run the model
            _, task_loss, output, state, spike, syn_x, syn_u = \
             sess.run([model.train, model.task_loss, \
             model.output, model.h, model.h_out, model.syn_x, model.syn_u], feed_dict=feed_dict)

            # Display network performance
            if i % 25 == 0:
                spiking = (par['num_time_steps'] / par['dt']) * np.mean(spike)
                acc = get_perf(trial_info['desired_output'], output,
                               trial_info['train_mask'])

                data_record['iter'].append(i)
                data_record['acc'].append(acc)
                data_record['task_loss'].append(task_loss)
                data_record['spiking'].append(spiking)

                trials = 4
                if i % 600 == -1 and i > 0:
                    for k in range(200):
                        fig, ax = plt.subplots(5, trials, figsize=[12, 8])
                        for b in range(trials):
                            ax[0, b].plot(state[:, b, k])
                            ax[1, b].plot(spike[:, b, k])
                            ax[2, b].plot(syn_x[:, b, k])
                            ax[3, b].plot(syn_u[:, b, k])
                            ax[4, b].plot(syn_x[:, b, k] * syn_u[:, b, k])
                        plt.savefig(
                            './savedir/mt128_neuron{}_outputs.png'.format(k))
                        plt.clf()
                        plt.close()

                trials = 4
                fig, ax = plt.subplots(6, trials, figsize=[12, 8])
                for b in range(trials):
                    ax[0, b].imshow(trial_info['neural_input'][:, b, :].T,
                                    aspect='auto')
                    ax[1, b].imshow(trial_info['desired_output'][:, b, :].T,
                                    aspect='auto')
                    ax[2, b].imshow(output[:, b, :].T, aspect='auto')
                    ax[3, b].imshow(state[:, b, :].T, aspect='auto')
                    ax[4, b].imshow(spike[:, b, :].T, aspect='auto')
                    ax[5, b].imshow((syn_u[:, b, :] * syn_x[:, b, :]).T,
                                    aspect='auto')

                ax[0, 0].set_ylabel('Network input')
                ax[1, 0].set_ylabel('Expected Output')
                ax[2, 0].set_ylabel('Network Output')
                ax[3, 0].set_ylabel('Membrane Voltage')
                ax[4, 0].set_ylabel('Spike Output')
                ax[5, 0].set_ylabel('Synaptic Eff.')
                ax[4, 0].set_xlabel('Time')

                plt.savefig('./savedir/iter{}_outputs.png'.format(i))
                plt.clf()
                plt.close()

                pickle.dump(
                    data_record,
                    open(par['savedir'] + par['save_fn'] + '.pkl', 'wb'))

                print('Iter: {:>6} | Accuracy: {:5.3f} | Task Loss: {:5.3f} | Spike Rate: {:6.2f} Hz'.format(\
                  i, acc, task_loss, spiking))

        if par['save_analysis']:
            save_results = {
                'task': task,
                'accuracy_curve': accuracy_curve,
                'loss_curve': loss_curve,
                'par': par
            }
            pickle.dump(save_results,
                        open(par['savedir'] + save_fn + '.pkl', 'wb'))

    print('\nModel execution complete.')
Exemplo n.º 26
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    """
    Reset TensorFlow before running anything
    """
    tf.reset_default_graph()
    """
    Create the stimulus class to generate trial paramaters and input activity
    """
    stim = stimulus.Stimulus()

    f = pickle.load(open('./savedir/var_pulses_8_cue_off.pkl', 'rb'))
    par['weights_trained'] = f['weights']
    update_parameters(f['parameters'])

    n_input, n_hidden, n_output = par['shape']
    N = par[
        'batch_train_size']  # trials per iteration, calculate gradients after batch_train_size
    """
    Define all placeholder
    """
    mask = tf.placeholder(
        tf.float32, shape=[par['num_time_steps'], par['batch_train_size']])
    x = tf.placeholder(
        tf.float32,
        shape=[n_input, par['num_time_steps'],
               par['batch_train_size']])  # input data
    y = tf.placeholder(
        tf.float32,
        shape=[n_output, par['num_time_steps'],
               par['batch_train_size']])  # target data

    config = tf.ConfigProto()
    #config.gpu_options.allow_growth=True

    # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use
    with tf.Session(config=config) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, mask)

        init = tf.global_variables_initializer()
        sess.run(init)

        # keep track of the model performance across training
        model_performance = {
            'accuracy': [],
            'pulse_accuracy': [],
            'loss': [],
            'perf_loss': [],
            'spike_loss': [],
            'trial': []
        }

        for i in range(par['num_iterations']):

            # generate batch of batch_train_size
            trial_info = stim.generate_trial(
                analysis=False,
                num_fixed=0,
                var_delay=par['var_delay'],
                var_resp_delay=par['var_resp_delay'],
                var_num_pulses=par['var_num_pulses'])

            if not par['var_num_pulses']:
                onset = np.array([
                    np.unique(np.array(trial_info['timeline']))[-2 * p - 2]
                    for p in range(par['num_pulses'])
                ][::-1])
                pulse_masks = np.array([
                    np.zeros((par['num_time_steps'], par['batch_train_size']),
                             dtype=np.float32)
                ] * par['num_pulses'])
                for p in range(par['num_pulses']):
                    pulse_masks[p, onset[p] +
                                par['mask_duration'] // par['dt']:onset[p] +
                                par['sample_time'] // par['dt'], :] = 1
            """
            Run the model
            """
            _, loss, perf_loss, spike_loss, y_hat, state_hist, syn_x_hist, syn_u_hist = \
                sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.y_hat, \
                model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], {x: trial_info['neural_input'], \
                y: trial_info['desired_output'], mask: trial_info['train_mask']})

            accuracy = analysis.get_perf(trial_info['desired_output'], y_hat,
                                         trial_info['train_mask'])

            pulse_accuracy = []
            if not par['var_num_pulses']:
                for p in range(par['num_pulses']):
                    pulse_accuracy.append(
                        analysis.get_perf(trial_info['desired_output'], y_hat,
                                          pulse_masks[p]))

            model_performance = append_model_performance(
                model_performance, accuracy, pulse_accuracy, loss, perf_loss,
                spike_loss, (i + 1) * N)
            """
            Save the network model and output model performance to screen
            """
            if i % par['iters_between_outputs'] == 0 and i > 0:
                print_results(i, N, perf_loss, spike_loss, state_hist,
                              accuracy)

            if i % 5000 == 0:
                weights = eval_weights()
                syn_x_stacked = np.stack(syn_x_hist, axis=1)
                syn_u_stacked = np.stack(syn_u_hist, axis=1)
                h_stacked = np.stack(state_hist, axis=1)
                trial_time = np.arange(0, h_stacked.shape[1] * par['dt'],
                                       par['dt'])
                mean_h = np.mean(np.mean(h_stacked, axis=2), axis=1)
                results = {
                    'model_performance': model_performance,
                    'parameters': par,
                    'weights': weights,
                    'trial_time': trial_time,
                    'mean_h': mean_h,
                    'timeline': trial_info['timeline']
                }
                pickle.dump(results,
                            open(par['save_dir'] + par['save_fn'], 'wb'))

            if accuracy > 0.995:
                weights = eval_weights()
                syn_x_stacked = np.stack(syn_x_hist, axis=1)
                syn_u_stacked = np.stack(syn_u_hist, axis=1)
                h_stacked = np.stack(state_hist, axis=1)
                trial_time = np.arange(0, h_stacked.shape[1] * par['dt'],
                                       par['dt'])
                mean_h = np.mean(np.mean(h_stacked, axis=2), axis=1)
                results = {
                    'model_performance': model_performance,
                    'parameters': par,
                    'weights': weights,
                    'trial_time': trial_time,
                    'mean_h': mean_h,
                    'timeline': trial_info['timeline']
                }
                pickle.dump(results,
                            open(par['save_dir'] + par['save_fn'], 'wb'))
                for b in range(10):
                    plot_list = [
                        trial_info['desired_output'][:, :, b],
                        softmax(
                            np.array(y_hat)[:, :, b].T -
                            np.max(np.array(y_hat)[:, :, b].T))
                    ]
                    fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(7, 7))
                    j = 0
                    for ax in axes.flat:
                        im = ax.imshow(plot_list[j], aspect='auto')
                        j += 1
                    cax, kw = mpl.colorbar.make_axes([ax for ax in axes.flat])
                    plt.colorbar(im, cax=cax, **kw)
                    plt.savefig("./savedir/output_" + str(par['num_pulses']) +
                                "pulses_iter_" + str(i) + "_" + str(b) +
                                ".png")
                    plt.close()
                    plt.imshow(trial_info['neural_input'][:, :, b])
                    plt.savefig("./savedir/input_" + str(par['num_pulses']) +
                                "pulses_iter_" + str(i) + "_" + str(b) +
                                ".png")
                    plt.close()
                break
        """
        Save model, analyze the network model and save the results
        """
        #save_path = saver.save(sess, par['save_dir'] + par['ckpt_save_fn'])
        if par['analyze_model']:
            weights = eval_weights()
            syn_x_stacked = np.stack(syn_x_hist, axis=1)
            syn_u_stacked = np.stack(syn_u_hist, axis=1)
            h_stacked = np.stack(state_hist, axis=1)
            trial_time = np.arange(0, h_stacked.shape[1] * par['dt'],
                                   par['dt'])
            mean_h = np.mean(np.mean(h_stacked, axis=2), axis=1)
            results = {
                'model_performance': model_performance,
                'parameters': par,
                'weights': weights,
                'trial_time': trial_time,
                'mean_h': mean_h,
                'timeline': trial_info['timeline']
            }
            pickle.dump(results, open(par['save_dir'] + par['save_fn'], 'wb'))
Exemplo n.º 27
0
def main(task, gpu_id):
    # If a gpu id has been provided, use it
    if gpu_id is not None:
        # Specify the gpu id to be used
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # Reset TensorFlow before running anything
    tf.reset_default_graph()

    # Tensorflow finds the supported CPU and GPU devices you can use
    config = tf.ConfigProto()

    trial_length = par['num_time_steps']

    # Calculate shape of the stimulus for this task
    # Define a placeholder for the stimulus the agent sees
    stimulus = tf.placeholder(
        tf.float64,
        shape=[trial_length, task.num_inputs, par['batch_train_size']])
    # Define a placeholder for the truth or correct answer about each trial
    truth = tf.placeholder(tf.float64, shape=par['batch_train_size'])
    # A TEMPORARY placeholder for target
    target = tf.placeholder(tf.float64,
                            shape=[trial_length, 3, par['batch_train_size']])

    # Create a model for the given task object
    M = Model()
    # Build the tf structure that runs trials
    M.run_model(task, stimulus, target)
    M.optimize(task, target)

    par['num_receptive_fields'] = 1
    par['num_fix_tuned'] = 0
    par['num_rule_tuned'] = 0
    par['num_rules'] = 1
    par['variable_delay_max'] = 100
    par['mask_duration'] = 0
    par['n_output'] = 3
    par['input_mean'] = 0
    par['n_input'] = 36
    par['catch_trial_pct'] = 0
    par['rotation_match'] = 0
    # Create a model from Nick's code
    stim = stm.Stimulus()
    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        t_start = time.time()
        for it in range(par['num_iterations']):
            # Create a batch of stimuli, stores in attribute stimulus for the task
            trial_info = stim.generate_trial()
            """
            Run the model
            """
            this_truth = np.zeros(par['batch_train_size'])
            this_truth[trial_info['desired_output'][1, -1, :] ==
                       1] = 1  # Match trials
            this_truth[trial_info['desired_output'][2, -1, :] ==
                       1] = 2  # Non_match trials
            _, _, vloss, ploss, pol_out, reward, this_action_array, this_time_mask, baseline, advantage = \
                sess.run([M.pol_train_op, M.val_train_op, M.Loss_val, M.Loss_pol, task.pol_out_history, task.reward, task.action_array, \
                task.time_mask, task.val_out_history, M.advantage], \
                {stimulus: np.swapaxes(trial_info['neural_input'], 1, 0), truth: this_truth, target: np.swapaxes(trial_info['desired_output'], 1, 0)})
            reward = reward * this_time_mask
            baseline = np.array(baseline) * this_time_mask
            pol_out = np.array(pol_out)

            if it % 100 == 0:
                plt.subplot(2, 2, 1)
                plt.plot(pol_out[:, :, 0])
                plt.legend(['Fixate', 'match', 'Non-match'])
                if this_truth[0] == 1:
                    plt.title('match' + '__' + str(reward.sum()))
                elif this_truth[0] == 2:
                    plt.title('non-match' + '__' + str(reward.sum()))
                plt.subplot(2, 2, 2)
                temp = np.tile(this_time_mask[:, 0].transpose(),
                               [3, 1]).transpose() * this_action_array[:, :, 0]
                plt.plot(temp)
                #plt.plot(advantage[:,0])
                #plt.plot(Pan_results['ploss'].transpose())
                #plt.plot(Pan_results['vloss'].transpose())
                #plt.legend(['-J', 'E'])
                plt.subplot(2, 2, 3)
                plt.plot(np.array(baseline)[:, 0])
                #plt.plot(my_reward_cum[:,0])
                #pdb.set_trace()
                plt.plot(reward[:, 0])
                plt.title('baseline')
                plt.plot(advantage[:, 0])
                plt.subplot(2, 2, 4)
                #plt.plot(np.squeeze(np.array(val_out))[:,0])
                #plt.title('his baseline')
                #plt.plot(trial_info['neural_input'][:,:,0].transpose())
                plt.plot(
                    np.swapaxes(trial_info['desired_output'], 1, 0)[:, :, 0])
                plt.title('Neural input')
                plt.savefig(par['save_path'] + 'Iteration_' + str(it) +
                            '.png')  # save the figure to file
                plt.close()
                print('%5d  |   Vloss: %6.6f   |   Reward: %4d' %
                      (it, vloss, reward.sum()))
Exemplo n.º 28
0
def main(gpu_id=None):

    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
    """
    Print key parameters
    """
    print_important_params()
    """
    Reset TensorFlow before running anything
    """
    tf.reset_default_graph()
    """
    Create the stimulus class to generate trial paramaters and input activity
    """
    stim = stimulus.Stimulus()

    n_input, n_hidden, n_output = par['shape']
    N = par[
        'batch_train_size']  # trials per iteration, calculate gradients after batch_train_size
    """
    Define all placeholder
    """
    mask = tf.placeholder(
        tf.float32, shape=[par['num_time_steps'], par['batch_train_size']])
    x = tf.placeholder(
        tf.float32,
        shape=[n_input, par['num_time_steps'],
               par['batch_train_size']])  # input data
    y = tf.placeholder(
        tf.float32,
        shape=[n_output, par['num_time_steps'],
               par['batch_train_size']])  # target data

    config = tf.ConfigProto()

    # enter "config=tf.ConfigProto(log_device_placement=True)" inside Session to check whether CPU/GPU in use
    with tf.Session(config=config) as sess:

        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, mask)

        sess.run(tf.global_variables_initializer())

        # keep track of the model performance across training
        model_performance = {
            'accuracy': [],
            'loss': [],
            'perf_loss': [],
            'spike_loss': [],
            'weight_loss': [],
            'trial': []
        }

        for i in range(par['num_iterations']):

            # generate batch of batch_train_size
            trial_info = stim.generate_trial(set_rule=None)
            """
            Run the model
            """
            _, loss, perf_loss, spike_loss, weight_loss, y_hat, state_hist, syn_x_hist, syn_u_hist = \
                sess.run([model.train_op, model.loss, model.perf_loss, model.spike_loss, model.weight_loss, model.y_hat, \
                model.hidden_state_hist, model.syn_x_hist, model.syn_u_hist], {x: trial_info['neural_input'], \
                y: trial_info['desired_output'], mask: trial_info['train_mask']})

            accuracy, _, _ = analysis.get_perf(trial_info['desired_output'],
                                               y_hat, trial_info['train_mask'])

            model_performance = append_model_performance(
                model_performance, accuracy, loss, perf_loss, spike_loss,
                weight_loss, (i + 1) * N)
            """
            Save the network model and output model performance to screen
            """
            if i % par['iters_between_outputs'] == 0:
                print_results(i, N, perf_loss, spike_loss, weight_loss,
                              state_hist, accuracy)
        """
        Save model, analyze the network model and save the results
        """
        save_results(model_performance)
Exemplo n.º 29
0
def main(save_fn, gpu_id=None):
    """ Run supervised learning training """

    # Update all dependencies in parameters
    update_dependencies()

    # Isolate requested GPU
    if gpu_id is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id

    # If desired, train the convolutional layers with the CIFAR datasets
    # Otherwise, the network will load convolutional weights from the saved file
    if (par['task'] in ['cifar', 'imagenet', 'colored_mnist'
                        ]) and par['train_convolutional_layers']:
        convolutional_layers.ConvolutionalLayers()

    print('\nRunning model.\n')

    # Reset TensorFlow graph
    tf.reset_default_graph()

    # Create placeholders for the model
    if par['task'] == 'mnist':
        x = tf.placeholder(tf.float32,
                           [par['batch_size'], par['layer_dims'][0]], 'stim')
    elif par['task'] == 'colored_mnist':
        x = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3], 'stim')
    elif par['task'] == 'cifar' or par['task'] == 'imagenet':
        x = tf.placeholder(tf.float32, [par['batch_size'], 32, 32, 3], 'stim')
    y = tf.placeholder(tf.float32, [par['batch_size'], par['layer_dims'][-1]],
                       'out')
    mask = tf.placeholder(tf.float32,
                          [par['batch_size'], par['layer_dims'][-1]], 'mask')
    rule = tf.placeholder(tf.float32, [par['batch_size'], par['n_tasks']],
                          'rulecue')
    gating = [
        tf.placeholder(tf.float32, [par['layer_dims'][n + 1]], 'gating')
        for n in range(par['n_layers'] - 1)
    ]
    droput_keep_pct = tf.placeholder(tf.float32, [], 'dropout')
    input_droput_keep_pct = tf.placeholder(tf.float32, [], 'input_dropout')

    # Set up stimulus
    if par['task'] == 'colored_mnist':
        stim = stimulus.Stimulus(labels_per_task=par['labels_per_task'],
                                 sep=par['separability'])
    else:
        stim = stimulus.Stimulus(labels_per_task=par['labels_per_task'])

    # Initialize accuracy records
    accuracy_full = []
    accuracy_grid = np.zeros((par['n_tasks'], par['n_tasks']))

    # Enter TensorFlow session
    with tf.Session() as sess:

        # Select CPU or GPU
        device = '/cpu:0' if gpu_id is None else '/gpu:0'
        with tf.device(device):
            model = Model(x, y, gating, mask, droput_keep_pct,
                          input_droput_keep_pct, rule)

        # Initialize variables
        sess.run(tf.global_variables_initializer())
        sess.run(model.reset_prev_vars)

        # Begin training loop, iterating over tasks
        for task in range(par['n_tasks']):

            # Create dictionary of gating signals applied to each hidden layer for this task
            gating_dict = {k: v for k, v in zip(gating, par['gating'][task])}

            # Create rule cue vector for this task
            rule_cue = np.zeros([par['batch_size'], par['n_tasks']])
            rule_cue[:, task] = 1

            # Iterate over batches
            for i in range(par['n_train_batches']):

                # Make batch of training data
                stim_in, y_hat, mk = stim.make_batch(task, test=False)

                # Run the model using one of the available stabilization methods
                if par['stabilization'] == 'pathint':
                    _, _, loss, AL = sess.run([model.train_op, model.update_small_omega, model.task_loss, model.aux_loss], \
                        feed_dict={x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})
                elif par['stabilization'] == 'EWC':
                    _, loss, AL = sess.run([model.train_op, model.task_loss, model.aux_loss], \
                        feed_dict={x:stim_in, y:y_hat, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})

                # Display network performance
                if i % 500 == 0:
                    print('Iter: ', i, 'Loss: ', loss, 'Aux Loss: ', AL)

            # Update big omegaes, and reset other values before starting new task
            if par['stabilization'] == 'pathint':
                sess.run(model.update_big_omega)
            elif par['stabilization'] == 'EWC':
                for _ in range(par['EWC_batch_divisor'] *
                               par['EWC_fisher_num_batches']):
                    stim_in, _, mk = stim.make_batch(task, test=False)
                    sess.run([model.update_big_omega], feed_dict = \
                        {x:stim_in, **gating_dict, mask:mk, droput_keep_pct:par['drop_keep_pct'], \
                        input_droput_keep_pct:par['input_drop_keep_pct'], rule:rule_cue})

            # Reset the Adam Optimizer, and set the prev_weight values to their current values
            sess.run(model.reset_adam_op)
            sess.run(model.reset_prev_vars)
            if par['stabilization'] == 'pathint':
                sess.run(model.reset_small_omega)

            # Test the networks on all trained tasks
            num_test_reps = 10
            accuracy = np.zeros((task + 1))
            for test_task in range(task + 1):

                # Use appropriate gating and rule cues
                gating_dict = {
                    k: v
                    for k, v in zip(gating, par['gating'][test_task])
                }
                test_rule_cue = np.zeros([par['batch_size'], par['n_tasks']])
                test_rule_cue[:, test_task] = 1

                # Repeat the test as desired
                for r in range(num_test_reps):
                    stim_in, y_hat, mk = stim.make_batch(test_task, test=True)
                    acc = sess.run(model.accuracy, feed_dict={x:stim_in, y:y_hat, \
                        **gating_dict, mask:mk, droput_keep_pct:1.0, input_droput_keep_pct:1.0, rule:test_rule_cue})/num_test_reps
                    accuracy_grid[task, test_task] += acc
                    accuracy[test_task] += acc

            # Display network performance after testing is complete
            print('Task ', task, ' Mean ', np.mean(accuracy), ' First ',
                  accuracy[0], ' Last ', accuracy[-1])
            accuracy_full.append(np.mean(accuracy))

            # Reset weights between tasks if called upon
            if par['reset_weights']:
                sess.run(model.reset_weights)

        # Save model performance and parameters if desired
        if par['save_analysis']:
            save_results = {'task': task, 'accuracy': accuracy, 'accuracy_full': accuracy_full, \
                            'accuracy_grid': accuracy_grid, 'par': par}
            pickle.dump(save_results, open(par['save_dir'] + save_fn, 'wb'))

    print('\nModel execution complete.')

    # write accuracy full to text
    with open('accs_200_squared.txt', 'a') as f:
        f.write(str(accuracy_full[0]) + "\n")
Exemplo n.º 30
0
def analyze_model_from_file(filename, savefile = None, update_params = {}):

    """ The first section loads the model weights and simulates the network on
        several different task conditions, saving the network activity and output """

    results = pickle.load(open(filename, 'rb'))

    if savefile is None:
        results['parameters']['save_fn'] = 'test.pkl'
    else:
        results['parameters']['save_fn'] = savefile

    update_parameters(results['parameters'])
    update_parameters(update_params)

    stim = stimulus.Stimulus()

    # generate trials with match probability at 50%
    trial_info = stim.generate_trial(test_mode = True)
    input_data = np.squeeze(np.split(trial_info['neural_input'], par['num_time_steps'], axis=0))

    h_init = results['weights']['h']

    y, h, syn_x, syn_u = run_model(input_data, h_init, \
        results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])

    # generate trials with random sample and test stimuli, used for decoding
    trial_info_decode = stim.generate_trial(test_mode = True)
    input_data = np.squeeze(np.split(trial_info_decode['neural_input'], par['num_time_steps'], axis=0))
    _, h_decode, syn_x_decode, syn_u_decode = run_model(input_data, h_init, \
        results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])

    # generate trials using DMS task, only used for measuring how neuronal and synaptic representations evolve in
    # a standardized way, used for figure correlating persistent activity and manipulation
    update_parameters({'trial_type': 'DMS'})
    trial_info_dms = stim.generate_trial(test_mode = True)
    input_data = np.squeeze(np.split(trial_info_dms['neural_input'], trial_info_dms['neural_input'].shape[0], axis=0))
    _, h_dms, syn_x_dms, syn_u_dms = run_model(input_data, h_init, \
        results['parameters']['syn_x_init'], results['parameters']['syn_u_init'], results['weights'])
    update_parameters(results['parameters']) # reset trial type to original value
    update_parameters(update_params)

    """ The next section performs various analysis """

    # calculate task accuracy
    results['task_accuracy'],_,_ = get_perf(trial_info['desired_output'], y, trial_info['train_mask'])
    results['task_accuracy_per_rule'] = []
    for r in np.unique(trial_info['rule']):
        ind = np.where(trial_info['rule'] == r)[0]
        acc, _, _ = get_perf(trial_info['desired_output'][:,ind,:], y[:,ind,:], trial_info['train_mask'][:, ind])
        results['task_accuracy_per_rule'].append(acc)

    print('Task accuracy',results['task_accuracy'])

    if par['calculate_resp_matrix']:
        print('calculate response matrix...')
        resp_matrix_results = calculate_response_matrix(trial_info_decode, results['weights'])
        for key, val in resp_matrix_results.items():
            if np.var(val) > 0:
                results[key] = val


    # Decode the sample direction from neuronal activity and synaptic efficacies using support vector machines
    trial_time = np.arange(0,h.shape[0]*par['dt'], par['dt'])
    trial_time_dms = np.arange(0,h_dms.shape[0]*par['dt'], par['dt'])
    if par['decoding_reps'] > 0:
        print('decoding activity...')
        decoding_results = calculate_svms(h_decode, syn_x_decode, syn_u_decode, trial_info_decode, trial_time, \
            num_reps = par['decoding_reps'], num_reps_stability = 10, decode_test = par['decode_test'], \
            decode_rule = par['decode_rule'], decode_match = par['decode_match'])
        for key, val in decoding_results.items():
            if np.var(val) > 0:
                results[key] = val

        if par['trial_type'] in ['DMS', 'DMC', 'DMRS90', 'DMRS90ccw', 'DMRS45', 'DMRS180', 'location_DMS']:
            for key, val in decoding_results.items():
                if np.var(val) > 0:
                    results[key + '_dms'] = val

        else:
            # Calculate decoding for a DMS trial, used to correlate persistent activity and manipulation
            update_parameters({'trial_type': 'DMS'})
            decoding_results = calculate_svms(h_dms, syn_x_dms, syn_u_dms, trial_info_dms, trial_time_dms, \
                num_reps = par['decoding_reps'], num_reps_stability = 0, decode_test = par['decode_test'], decode_rule = par['decode_rule'])
            for key, val in decoding_results.items():
                if np.var(val) > 0:
                    results[key + '_dms'] = val
            update_parameters(results['parameters'])
            update_parameters(update_params)



    # Calculate neuronal and synaptic sample motion tuning
    if par['analyze_tuning']:
        print('calculate tuning...')

        tuning_results = calculate_tuning(h_decode, syn_x_decode, syn_u_decode, \
            trial_info_decode, trial_time, results['weights'], calculate_test = par['decode_test'])
        for key, val in tuning_results.items():
            if np.var(val) > 0:
                results[key] = val

        # Calculate tuning for a DMS trial, used to correlate persistent activity and manipulation
        if par['trial_type'] in ['DMS', 'DMC', 'DMRS90', 'DMRS90ccw','DMRS45', 'DMRS180', 'location_DMS']:
            for key, val in tuning_results.items():
                if np.var(val) > 0:
                    results[key + '_dms'] = val
        else:
            update_parameters({'trial_type': 'DMS'})
            tuning_results = calculate_tuning(h_dms, syn_x_dms, syn_u_dms, \
                trial_info_dms, trial_time_dms, results['weights'], calculate_test = False)
            for key, val in tuning_results.items():
                if np.var(val) > 0:
                    results[key + '_dms'] = val
            update_parameters(results['parameters'])
            update_parameters(update_params)


    # Calculate mean sample traces
    results['h_sample_mean'] = np.zeros((results['parameters']['num_time_steps'], results['parameters']['n_hidden'], \
        results['parameters']['num_motion_dirs']), dtype = np.float32)
    for i in range(results['parameters']['num_motion_dirs']):
        ind = np.where(trial_info_decode['sample'] == i)[0]
        results['h_sample_mean'][:,:,i] = np.mean(h_decode[:,ind,:], axis = 1)

    # Calculate the neuronal and synaptic contributions towards solving the task
    if par['simulation_reps'] > 0:
        print('simulating network...')
        simulation_results = simulate_network(trial_info, h, syn_x, \
            syn_u, results['weights'], num_reps = par['simulation_reps'])
        for key, val in simulation_results.items():
            if np.var(val) > 0:
                results[key] = val

    # Save the analysis results
    pickle.dump(results, open(savefile, 'wb') )
    print('Analysis results saved in ', savefile)