def start():
    seed = 81223

    training_time = 1 #in seconds
    testing_time = 0.5

    DperE = 32
    dim = 32
    NperD = 30

    neurons_per_vector = 20
    num_vectors = 5

    oja_scale = np.true_divide(2,1)
    oja_learning_rate = np.true_divide(1,50)
    pre_tau = 0.03
    post_tau = 0.03
    pes_learning_rate = np.true_divide(1,1)

    config = locals()

    cleanup_params = {'radius':1.0,
                       'max_rates':[400],
                       'intercepts':[0.13]}

    ensemble_params = {'radius':1.0,
                       'max_rates':[400],
                       'intercepts':[0.1]}

    #intercepts actually matter quite a bit
    config['cint'] = cleanup_params['intercepts'][0]
    config['eint'] = ensemble_params['intercepts'][0]

    do_plots = True

    data_title = 'lcdata'
    directory = 'learning_cleanup_data'

    data_filename = fh.make_filename(data_title, directory=directory,
                                     config_dict=config, extension='.npz',
                                     use_time=False)

    data = fh.npload(data_filename)

    if data is None:
        results = build_and_run_vectors(ensemble_params=ensemble_params,
                                        cleanup_params=cleanup_params,
                                        **config)
        data = extract_data(filename=data_filename, **results)

    if do_plots:
        plot_title = 'lcplot'
        directory='learning_cleanup_plots'
        plot_filename = fh.make_filename(plot_title, directory=directory,
                                    config_dict=config, extension='.png')
        plot(filename=plot_filename, **data)
        plt.show()
Exemple #2
0
def start():
    seed = 81223

    training_time = 1  #in seconds
    testing_time = 0.5

    DperE = 32
    dim = 32
    NperD = 30

    neurons_per_vector = 20
    num_vectors = 5

    oja_scale = np.true_divide(2, 1)
    oja_learning_rate = np.true_divide(1, 50)
    pre_tau = 0.03
    post_tau = 0.03
    pes_learning_rate = np.true_divide(1, 1)

    config = locals()

    cleanup_params = {'radius': 1.0, 'max_rates': [400], 'intercepts': [0.13]}

    ensemble_params = {'radius': 1.0, 'max_rates': [400], 'intercepts': [0.1]}

    #intercepts actually matter quite a bit
    config['cint'] = cleanup_params['intercepts'][0]
    config['eint'] = ensemble_params['intercepts'][0]

    do_plots = True

    data_title = 'lcdata'
    directory = 'learning_cleanup_data'

    data_filename = fh.make_filename(data_title,
                                     directory=directory,
                                     config_dict=config,
                                     extension='.npz',
                                     use_time=False)

    data = fh.npload(data_filename)

    if data is None:
        results = build_and_run_vectors(ensemble_params=ensemble_params,
                                        cleanup_params=cleanup_params,
                                        **config)
        data = extract_data(filename=data_filename, **results)

    if do_plots:
        plot_title = 'lcplot'
        directory = 'learning_cleanup_plots'
        plot_filename = fh.make_filename(plot_title,
                                         directory=directory,
                                         config_dict=config,
                                         extension='.png')
        plot(filename=plot_filename, **data)
        plt.show()
Exemple #3
0
    def objective(kwargs):
        params.oja_scale = kwargs['oja_scale']
        params.oja_learning_rate = kwargs['oja_learning_rate']
        params.ensemble_params['intercepts'] = kwargs['ens_intercept']
        params.cleanup_params['intercepts'] = kwargs['cleanup_intercept']
        params.cleanup_params['radius'] = kwargs['radius']
        params.tau_rc = kwargs['tau_rc']
        params.tau_ref = kwargs['tau_ref']
        params.ctau_rc = kwargs['ctau_rc']
        params.ctau_ref = kwargs['ctau_ref']
        params.seed = kwargs['seed']

        mean_input_sims = []
        mean_output_sims = []

        for i in range(num_samples):
            dr = '/data/e2crawfo/cleanuplearning/opt'
            data_fname = fh.make_filename('training', directory=dr,
                    config_dict=params.__dict__, use_time=True)
            model_fname = fh.make_filename('models', directory=dr,
                    config_dict=params.__dict__, use_time=True)
            test_fname = fh.make_filename('tests', directory=dr,
                    config_dict=params.__dict__, use_time=True)

            learn.learn(data_fname, model_fname, params, simple='simple2')
            test.test_edges(model_fname, test_fname, testing_time, num_tests)

            analysis = analyze.analyze_edge_test_similarity(test_fname)
            input_sims, output_sims, data = analysis

            mean_input_sims.append(np.mean(input_sims))
            mean_output_sims.append(np.mean(output_sims))
            params.seed += 1

        mean_input_sims = np.array(mean_input_sims)
        mean_output_sims = np.array(mean_output_sims)

        losses = mean_input_sims - mean_output_sims

        return {
            'loss': np.mean(losses),
            'loss_variance': stats.sem(losses),
            'status': STATUS_OK,
            }
Exemple #4
0
file_config = {
                'seed':seed,
                'NperD':NperD,
                'dim':dim,
                'DperE': DperE,
                'int':intercepts[0],
                'maxrates':max_rates[0],
                'radius':radius,
                'ojascale':oja_scale,
                'lr':oja_learning_rate,
                'hrrnum':hrr_num,
                'learntime':learning_time,
                'testtime':testing_time,
              }

filename = fh.make_filename("oja_graph_data", directory="oja_graph_data",
                            use_time=False, config_dict=file_config, extension=".npz")

run_sim = False

try:
    print "Trying to load..."
    f = open(filename, 'r')
    with np.load(f) as npz:
        sims, sims_time = npz['sims'], npz['sims_time']
        before_spikes, before_time = npz['before_spikes'], npz['before_time']
        after_spikes, after_time = npz['after_spikes'], npz['after_time']
    print "Loaded"
except:
    print "Couldn't load."
    run_sim = True
Exemple #5
0
#dims = [(0,p[0], p[1]) for p in points]
dims = [(1, p[0], p[1]) for p in points]

line_styles = {0: '-', 1:'--'}

cleanup_n = [100, 200, 300, 400, 500]

dim_data = {key: [] for key in dims}

for d in dims:
    i = 0
    for cn in cleanup_n:
        t1, t2 = run(d[0], d[1], d[2], cn)
        dim_data[d].append(t1)
        i += 1

data = np.zeros((0, len(cleanup_n)))
for key in dims:
    plt.plot(cleanup_n, dim_data[key], ls=line_styles[key[0]], label=str(key))

plt.legend()

file_config = {}

filename = fh.make_filename('runtimes', directory='runtimes',
                            config_dict=file_config, extension='.png')
plt.savefig(filename)

plt.show()

def start():
    seed = 81223

    training_time = 1 #in seconds
    testing_time = 0.5

    DperE = 32
    dim = 32
    NperD = 30

    N = 5
    cleanup_n = N * 20

    num_tests = 5

    oja_scale = np.true_divide(2,1)
    oja_learning_rate = np.true_divide(1,50)
    pre_tau = 0.03
    post_tau = 0.03
    pes_learning_rate = np.true_divide(1,1)

    config = locals()

    #Don't put all parematers in config
    cleanup_params = {'radius':1.0,
                       'max_rates':[400],
                       'intercepts':[0.13]}

    ensemble_params = {'radius':1.0,
                       'max_rates':[400],
                       'intercepts':[0.1]}

    #intercepts actually matter quite a bit, so put them in the filename
    config['cint'] = cleanup_params['intercepts'][0]
    config['eint'] = ensemble_params['intercepts'][0]


    data_title = 'lsndata'
    directory = 'learning_sn_data'

    data_filename = fh.make_filename(data_title, directory=directory,
                                     config_dict=config, extension='.npz',
                                     use_time=False)

    data = fh.npload(data_filename)

    if data is None:
        #build the graph and get the vectors encoding it
        hrr_vectors, id_vectors, edge_vectors, G = build_semantic_network(dim, N, seed=seed)

        edges = random.sample(list(G.edges_iter(data=True)), num_tests)
        correct_vectors = [hrr_vectors[v] for u,v,d in edges]
        testing_vectors = [hrr_vectors[u].convolve(~edge_vectors[d['index']]) for u,v,d in edges]
        testing_vectors = map(lambda x: x.v, testing_vectors)

        hrr_vectors = map(lambda x: hrr_vectors[x].v, G.nodes_iter())
        id_vectors = map(lambda x: id_vectors[x].v, G.nodes_iter())

        results = build_and_run(address_vectors = id_vectors, stored_vectors=hrr_vectors,
                                testing_vectors=testing_vectors, cleanup_params=cleanup_params,
                                ensemble_params=ensemble_params, **config)

        data = extract_data(filename=data_filename, correct_vectors=correct_vectors, **results)

    do_plots = True
    if do_plots:
        plot_title = 'lsnplot'
        directory='learning_sn_plots'
        plot_filename = fh.make_filename(plot_title, directory=directory,
                                    config_dict=config, extension='.png')
        plot(filename=plot_filename, **data)
        plt.show()
Exemple #7
0
if plot_connection_weights:
    extremes_only = True
    for i in range(min(1, cleanup_n)):
        plt.subplot(offset)
        connection_weights = sim.data(oja_weights)[-trunc:, i, :]
        connection_weights = np.squeeze(connection_weights)
        if extremes_only:
            maxes = np.amax(connection_weights, 1)
            mins = np.amin(connection_weights, 1)
            plt.plot(t, maxes, label='cleanup, filter=0.1')
            plt.plot(t, mins, label='cleanup, filter=0.1')
        else:
            plt.plot(t, connection_weights, label='cleanup, filter=0.1')
        remove_xlabels()
        offset += 1

file_config = {
    'ensembleN': ensemble_n,
    'cleanupNperV': cleanup_nperv,
    'vectorN': vector_n,
    'dim': dim,
    'trainonhrr': train_on_hrr,
}

filename = fh.make_filename('cleanup',
                            directory='plots',
                            config_dict=file_config,
                            extension='.png')
plt.savefig(filename)
plt.show()
Exemple #8
0
                                  t2,
                                  sim2,
                                  weight_probes,
                                  label='Connection Weights')

file_config = {
    'NperE': NperE,
    'numEnsembles': num_ensembles,
    'dim': dim,
    'DperE': DperE,
    'cleanupN': cleanup_n,
    'int': intercepts[0],
    'ojascale': oja_scale,
    'lr': oja_learning_rate,
    'hrrnum': hrr_num,
}

filename = fh.make_filename('oja_network_array',
                            directory='oja_network_array',
                            config_dict=file_config,
                            extension='.png')
plt.savefig(filename)

end = time.time()
print "Time:", end - start

overall_end = time.time()
print "Total time: ", overall_end - overall_start

plt.show()
 def make_filename(title, dir, ext='', usetime=False):
     return fh.make_filename(title, directory, self.__dict__, False)
Exemple #10
0
def start():
    seed = 81223

    training_time = 1  #in seconds
    testing_time = 0.5

    DperE = 32
    dim = 32
    NperD = 30

    N = 5
    cleanup_n = N * 20

    num_tests = 5

    oja_scale = np.true_divide(2, 1)
    oja_learning_rate = np.true_divide(1, 50)
    pre_tau = 0.03
    post_tau = 0.03
    pes_learning_rate = np.true_divide(1, 1)

    config = locals()

    #Don't put all parematers in config
    cleanup_params = {'radius': 1.0, 'max_rates': [400], 'intercepts': [0.13]}

    ensemble_params = {'radius': 1.0, 'max_rates': [400], 'intercepts': [0.1]}

    #intercepts actually matter quite a bit, so put them in the filename
    config['cint'] = cleanup_params['intercepts'][0]
    config['eint'] = ensemble_params['intercepts'][0]

    data_title = 'lsndata'
    directory = 'learning_sn_data'

    data_filename = fh.make_filename(data_title,
                                     directory=directory,
                                     config_dict=config,
                                     extension='.npz',
                                     use_time=False)

    data = fh.npload(data_filename)

    if data is None:
        #build the graph and get the vectors encoding it
        hrr_vectors, id_vectors, edge_vectors, G = build_semantic_network(
            dim, N, seed=seed)

        edges = random.sample(list(G.edges_iter(data=True)), num_tests)
        correct_vectors = [hrr_vectors[v] for u, v, d in edges]
        testing_vectors = [
            hrr_vectors[u].convolve(~edge_vectors[d['index']])
            for u, v, d in edges
        ]
        testing_vectors = map(lambda x: x.v, testing_vectors)

        hrr_vectors = map(lambda x: hrr_vectors[x].v, G.nodes_iter())
        id_vectors = map(lambda x: id_vectors[x].v, G.nodes_iter())

        results = build_and_run(address_vectors=id_vectors,
                                stored_vectors=hrr_vectors,
                                testing_vectors=testing_vectors,
                                cleanup_params=cleanup_params,
                                ensemble_params=ensemble_params,
                                **config)

        data = extract_data(filename=data_filename,
                            correct_vectors=correct_vectors,
                            **results)

    do_plots = True
    if do_plots:
        plot_title = 'lsnplot'
        directory = 'learning_sn_plots'
        plot_filename = fh.make_filename(plot_title,
                                         directory=directory,
                                         config_dict=config,
                                         extension='.png')
        plot(filename=plot_filename, **data)
        plt.show()
ax, offset = nengo_stack_plot(offset, t, sim, output_probes, func=sim_func, label='Similarity')
ax, offset = nengo_stack_plot(offset, t, sim, error_probes, label='Error', removex=False)

file_config = {}
#                'NperE':NperE,
#                'numEnsembles':num_ensembles,
#                'dim':dim,
#                'DperE': DperE,
#                'cleanupN': cleanup_n,
#                'premaxr':max_rates[0],
#                'preint':pre_intercepts[0],
#                'int':intercepts[0],
#                'ojascale':oja_scale,
#                'lr':oja_learning_rate,
#                'hrrnum':hrr_num,
#              }

filename = fh.make_filename('network_array_pes', directory='network_array_pes',
                            config_dict=file_config, extension='.png')
plt.savefig(filename)

end = time.time()
print "Plot Time: ", end - start

overall_end = time.time()
print "Total time: ", overall_end - overall_start

plt.show()


Exemple #12
0
                post_tau = 0.03,
                training_time=1.0,
                pes_learning_rate = np.true_divide(1,1),
                cleanup_params = {'radius':0.62345,
                                   'max_rates':[400],
                                   'intercepts':[0.32056287]},
                )

params.ensemble_params = {'radius':np.sqrt(np.true_divide(params.DperE, params.dim)),
                          'max_rates':[400],
                          'intercepts':[0.2278348]}

dim = params.dim

run_configs = [(params.dim, N, s) for N in num_vectors for s in range(num_samples)]
learn_results = [fh.make_filename('learn_D_%g_N_%g_s_%g' % rc, dr+'learn', use_time=False) for rc in run_configs]
learned_networks = [fh.make_filename('network_D_%g_N_%g_s_%g' % rc, dr+'networks', use_time=False) for rc in run_configs]
edge_test_results = [fh.make_filename('edge_test_D_%g_N_%g_s_%g' % rc, dr+'tests', use_time=False) for rc in run_configs]
edge_test_plots = [fh.make_filename('edge_test_plots_D_%g_N_%g_s_%g' % rc, dr+'plots', use_time=False) for rc in run_configs]

edge_test_plots = [dr + 'plots/edge_simulation_plot_D_%g_N_%g_s_%g.pdf' % rc for rc in run_configs]

def task_learn():
    for ln, lr, rc in zip(learned_networks, learn_results, run_configs):
        cur_params = copy.deepcopy(params)
        cur_params.num_vectors = rc[1]
        cur_params.seed = params.seed + rc[2]

        yield  {
                'name':ln,
                'actions':[(learn.learn, [lr, ln, cur_params, 'simple2'])],
Exemple #13
0
            }

    then = time.time()

    print "Calling fMin"
    best = fmin(objective,
        space=space,
        algo=tpe.suggest,
        max_evals=20,
        trials=trials)
    print "Done fMin"

    now = time.time()

    directory = '/data/e2crawfo/cleanuplearning/opt/logs'
    filename = fh.make_filename('optlog', directory=directory, use_time=True)
    aggregated_log = open(filename, 'w')

    aggregated_log.write("Time for fmin: " + str(now - then) + "\n")
    aggregated_log.write("Trials: " + str(trials.trials) + "\n")
    aggregated_log.write("Results: " + str(trials.results) + "\n")
    aggregated_log.write("Losses: " + str(trials.losses()) + "\n")
    aggregated_log.write("Statuses: " + str(trials.statuses()) + "\n")

    aggregated_log.close()


    for p in workers:
       p.terminate()

    for i in range(min(1, cleanup_n)):
        plt.subplot(offset)
        connection_weights = sim.data(oja_weights)[-trunc:,i,:]
        connection_weights = np.squeeze(connection_weights)
        if extremes_only:
            maxes = np.amax(connection_weights, 1)
            mins = np.amin(connection_weights, 1)
            plt.plot(t, maxes, label='cleanup, filter=0.1')
            plt.plot(t, mins, label='cleanup, filter=0.1')
        else:
            plt.plot(t, connection_weights, label='cleanup, filter=0.1')
        remove_xlabels()
        offset += 1



file_config = {
                'ensembleN':ensemble_n,
                'cleanupNperV':cleanup_nperv,
                'vectorN':vector_n,
                'dim':dim,
                'trainonhrr':train_on_hrr,
              }

filename = fh.make_filename('cleanup', directory='plots',
                            config_dict=file_config, extension='.png')
plt.savefig(filename)
plt.show()


Exemple #15
0
#Learning
plt.subplot(offset)
plt.plot(t2[0:-len(t1)], inn2[0:-len(t1)])
plt.ylabel('Input')
offset += 1

plt.subplot(offset)
rasterplot(t2[0:-len(t1)], spikes2[0:-len(t1)])
plt.ylabel('Cleanup: Spikes')
offset += 1

file_config = {
    'ensembleN': ensemble_n,
    'premaxr': max_rates[0],
    'preint': pre_intercepts[0],
    'int': intercepts[0],
    'dim': dim,
    'ojascale': oja_scale,
    'lr': oja_learning_rate,
    'hrrnum': hrr_num,
}

filename = fh.make_filename('oja_select',
                            directory='oja_select',
                            config_dict=file_config,
                            extension='.png')
plt.savefig(filename)

plt.show()
plt.ylabel('Cleanup: Spikes')
offset += 1

#Learning
plt.subplot(offset)
plt.plot(t2[0:-len(t1)], inn2[0:-len(t1)])
plt.ylabel('Input')
offset += 1

plt.subplot(offset)
rasterplot(t2[0:-len(t1)], spikes2[0:-len(t1)])
plt.ylabel('Cleanup: Spikes')
offset += 1

file_config = {
                'ensembleN':ensemble_n,
                'premaxr':max_rates[0],
                'preint':pre_intercepts[0],
                'int':intercepts[0],
                'dim':dim,
                'ojascale':oja_scale,
                'lr':oja_learning_rate,
                'hrrnum':hrr_num,
              }

filename = fh.make_filename('oja_select', directory='oja_select',
                            config_dict=file_config, extension='.png')
plt.savefig(filename)

plt.show()
Exemple #17
0
def task_example_simulation_plot():

    num_tests = 5
    testing_time = 0.5

    params = association_network.Parameters(
                    seed=257938,
                    dim=8,
                    DperE = 8,
                    #dim=64,
                    #DperE = 32,
                    num_vectors = 5,
                    neurons_per_vector = 30,
                    NperD = 50,
                    pre_tau = 0.03,
                    post_tau = 0.03,
                    training_time = 1.0,
                    tau_ref=0.0048327,
                    tau_rc=0.09689,
                    ctau_ref=0.00257,
                    ctau_rc=0.27103,
                    oja_scale=20.221052987,
                    oja_learning_rate=0.045654,
                    pes_learning_rate = np.true_divide(1,1),
                    cleanup_params = {'radius':0.716534,
                                       'max_rates':[200],
                                       'intercepts':[0.133256]},
                    )

    params.ensemble_params = {'radius':np.sqrt(np.true_divide(params.DperE, params.dim)),
                              'max_rates':[200],
                              'intercepts':[0.098351]}

    dim = params.dim
    num_vectors = params.num_vectors

    learn_data_fname = fh.make_filename('example_learn', directory=dr+'learn', config_dict=params.__dict__, use_time=False)
    learn_model_fname = fh.make_filename('example_network', directory=dr+'networks', config_dict=params.__dict__, use_time=False)
    test_fname = fh.make_filename('example_tests', directory=dr+'tests', config_dict=params.__dict__, use_time=False)
    plot_fname = fh.make_filename('example_plots', directory=dr+'plots', config_dict=params.__dict__, use_time=False)
    plot_fname += ".pdf"

    yield  {
            'name':'example_learn_D_%g_num_vectors_%g' % (dim, num_vectors),
            'actions':[(learn.learn, [learn_data_fname, learn_model_fname, params, 'simple2'])],
            'file_dep':[],
            'targets':[learn_data_fname, learn_model_fname],
            'uptodate':[run_once]
           }

    yield  {
            'name':'example_test_D_%g_num_vectors_%g' % (dim, num_vectors),
            'actions':[(test.test_edges, [learn_model_fname, test_fname, testing_time, num_tests])],
            'file_dep':[learn_model_fname],
            'targets':[test_fname]
           }

    yield  {
            'name':'example_simulation_plot_D_%g_num_vectors_%g' % (dim, num_vectors),
            'actions':[(plot.simulation_plot, [plot_fname, learn_data_fname, test_fname])],
            'file_dep':[test_fname, learn_data_fname],
            'targets':[plot_fname]
           }