def compute_bcpnn_traces(spike_train_0, spike_train_1, K_vec, syn_params, t_sim, plot=False):
    #######################
    # OFFLINE COMPUTATION
    #######################
    s_pre = BCPNN.convert_spiketrain_to_trace(spike_train_0, t_sim)
    s_post = BCPNN.convert_spiketrain_to_trace(spike_train_1, t_sim)
    bcpnn_traces = []
    wij, bias, pi, pj, pij, ei, ej, eij, zi, zj = BCPNN.get_spiking_weight_and_bias(s_pre, s_post, syn_params, K_vec=K_vec)
    w_end = wij[-1]
    bcpnn_traces = [wij, bias, pi, pj, pij, ei, ej, eij, zi, zj, s_pre, s_post]

    if plot:

        dt = 0.1
        info_txt = 'nspikes pre %d\nnspikes post %d\nw_final: %.2f' % (len(spike_train_0), len(spike_train_1), w_end)
        TP = TracePlotter.TracePlotter(plot_params=plot_params)

        # either plot the K_vec
        TP.plot_trace_with_spikes(bcpnn_traces, syn_params, dt, output_fn=None, fig=None, \
                color_pre='b', color_post='g', color_joint='r', style_joint='-', K_vec=K_vec, \
                extra_txt='Kappa value gates learning')
        # or add some extra info_text in one of the subplots:
#        TP.plot_trace(bcpnn_traces, syn_params, dt, output_fn=None, info_txt=info_txt, fig=None, \
#                color_pre='b', color_post='g', color_joint='r', style_joint='-')
    return w_end
def compute_many_bcpnn_traces(spike_train_0, spike_train_1, K_vec, syn_params, t_sim, extra_txt=''):
    """
    TP -- trace plotter, should be None for the first call of this function, then the old plotter is used
            for plotting additional traces
    """
    s_pre = BCPNN.convert_spiketrain_to_trace(spike_train_0, t_sim)
    s_post = BCPNN.convert_spiketrain_to_trace(spike_train_1, t_sim)
    bcpnn_traces = []
    wij, bias, pi, pj, pij, ei, ej, eij, zi, zj = BCPNN.get_spiking_weight_and_bias(s_pre, s_post, syn_params, K_vec=K_vec)
    w_end = wij[-1]
    bcpnn_traces = [wij, bias, pi, pj, pij, ei, ej, eij, zi, zj, s_pre, s_post]

    return bcpnn_traces
def bcpnn_oc_oc(params):
    print 'BCPNN OC -> OC'

    n_patterns = params['n_patterns']
    n_hc = params['n_hc']
    n_mc = params['n_mc']
    n_readout = params['n_readout']
    bcpnn = BCPNN.BCPNN(n_hc, n_mc, n_hc, n_mc, n_patterns, params)

    # train with the recurrent connections with OC output activity after learning OB -> OC
    #oc_activity_fn = params['oc_abstract_activity_fn']
    #bcpnn.load_input_activity(oc_activity_fn)
    #bcpnn.load_output_activity(oc_activity_fn)
    #bcpnn.initialize()

    # train with binary oc activation derived from WTA after 2nd VQ
    oc_oc_training_fn = params['binary_oc_activation_fn']
    # train with the output activity when learning the ob-oc connections
#    oc_oc_training_fn = params['oc_abstract_activity_fn']

    print 'BCPNN OC <-> OC training with:', oc_oc_training_fn
    bcpnn.load_input_activity(oc_oc_training_fn)
    bcpnn.load_output_activity(oc_oc_training_fn)

#    bcpnn.initialize()

    activity_fn = params['oc_oc_abstract_activity_fn'] # for output 
    weights_fn = params['oc_oc_abstract_weights_fn']
    bias_fn = params['oc_oc_abstract_bias_fn']
    n_steps = params['n_bcpnn_steps']
    for i in xrange(n_steps):
    #    bcpnn.train_network()
        bcpnn.train_network(activity_fn, weights_fn, bias_fn)
    bcpnn.write_to_files(activity_fn, weights_fn, bias_fn)
    del bcpnn
def bcpnn_oc_readout(params, readout_activation=None):
    print 'BCPNN OC -> READOUT'
    n_patterns = params['n_patterns']
    n_hc = params['n_hc']
    n_mc = params['n_mc']
    if readout_activation == None:
        # same number of patterns and readout cells
        n_readout = params['n_patterns']
        readout_activation = np.eye(n_patterns)
    else: # e.g. noisy patterns
        n_readout = params['n_readout']

    bcpnn = BCPNN.BCPNN(n_hc, n_mc, 1, n_readout, n_patterns, params)
    # take the output activity of OB-OC Bcpnn as activity
    oc_activity_fn = params['oc_abstract_activity_fn']
    # take the output activity of OC-OC Bcpnn as activity
#    oc_activity_fn = params['oc_oc_abstract_activity_fn']
    print 'Loading as input activity', oc_activity_fn
    bcpnn.load_input_activity(oc_activity_fn)

    bcpnn.load_output_activity(readout_activation)
#    bcpnn.initialize()

    activity_fn = params['readout_abstract_activity_fn']
    weights_fn = params['oc_readout_abstract_weights_fn']
    bias_fn = params['oc_readout_abstract_bias_fn']

    #n_steps = 1
    n_steps = params['n_bcpnn_steps']
    for i in xrange(n_steps):
    #    bcpnn.train_network()
        bcpnn.train_network(activity_fn, weights_fn, bias_fn)
    #bcpnn.train_network(activity_fn, weights_fn, bias_fn)
    bcpnn.write_to_files(activity_fn, weights_fn, bias_fn)

    #if params['multiple_concentrations_per_pattern']:
    #    n_patterns = 10
    # testing 
    #del bcpnn
    bcpnn = BCPNN.BCPNN(n_hc, n_mc, 1, n_readout, n_patterns, params)
    test_input = oc_activity_fn
    test_output = params['readout_abstract_activity_fn'].rsplit('.dat')[0] + '_test.dat'
#    print 'BCPNN.testing(input = %s \nweights = %s \nbias = %s \ntest_output = %s' % (test_input, weights_fn, bias_fn, test_output)
    bcpnn.testing(test_input, weights_fn, bias_fn, output_fn=test_output)
def compute_bcpnn_traces(spike_train_0, spike_train_1, K_vec, syn_params, t_sim, plot=False, extra_txt='', wij_lim=None):
    #######################
    # OFFLINE COMPUTATION
    #######################
    s_pre = BCPNN.convert_spiketrain_to_trace(spike_train_0, t_sim)
    s_post = BCPNN.convert_spiketrain_to_trace(spike_train_1, t_sim)
    bcpnn_traces = []
    wij, bias, pi, pj, pij, ei, ej, eij, zi, zj = BCPNN.get_spiking_weight_and_bias(s_pre, s_post, syn_params, K_vec=K_vec)
    w_end = wij[-1]
    bcpnn_traces = [wij, bias, pi, pj, pij, ei, ej, eij, zi, zj, s_pre, s_post]

    if plot:
        dt = 0.1
        info_txt = 'nspikes pre %d\nnspikes post %d\nw_final: %.2f' % (len(spike_train_0), len(spike_train_1), w_end)
        TP = TracePlotter.TracePlotter(plot_params=plot_params)

        # either plot the K_vec
        TP.plot_zij_pij_weight_bias(bcpnn_traces, syn_params, dt, output_fn=None, fig=None, \
                color_pre='b', color_post='g', color_joint='r', style_joint='-', \
                extra_txt=extra_txt)
#                extra_txt=extra_txt, wij_lim=wij_lim)
    return w_end
def bcpnn_ob_oc(params):
    print 'BCPNN OB -> OC'

    n_mit = params['n_mit']

    n_patterns = params['n_patterns']
    n_hc = params['n_hc']
    n_mc = params['n_mc']
    bcpnn = BCPNN.BCPNN(1, n_mit, n_hc, n_mc, n_patterns, params) # 1 src HC, n_mit MC --> n_hc, n_mc

    ob_activity_fn = params['mit_mds_input_fn']
    activity_fn = params['oc_abstract_activity_fn']
    
    bias_fn = params['ob_oc_abstract_bias_fn']
    binary_oc_activation_fn = params['binary_oc_activation_fn']
    w_ij_mit_hc = params['vq_ob_oc_output_fn']
    weights_fn = params['ob_oc_abstract_weights_fn']
    print "BCPNN OB -> OC loading files:", ob_activity_fn


    bcpnn.load_input_activity(ob_activity_fn)
    bcpnn.load_output_activity(binary_oc_activation_fn)
    bcpnn.load_mc_hc_mask(w_ij_mit_hc, silent_units_fn=os.path.exists(params['silent_mit_fn']))
#    bcpnn.initialize()

    n_steps = params['n_bcpnn_steps']
    for i in xrange(n_steps):
    #    bcpnn.train_network()
        bcpnn.train_network()#activity_fn, weights_fn, bias_fn)
    #bcpnn.train_network(activity_fn, weights_fn, bias_fn)

#    if os.path.exists(params['silent_mit_fn']):
#        bcpnn.silence_mit(params['silent_mit_fn'])
#    print 'bcpnn_ob_oc: input = %s\tweights = %s\tbias = %s\ttest_output = %s' % (test_input, weights_fn, bias_fn, test_output)
    bcpnn.write_to_files(activity_fn, weights_fn, bias_fn)
    del bcpnn
# -*- coding: utf-8 -*-
import time
import numpy as np
import numpy as np, pylab as plt
import nest
import sys
sys.path.insert(0, '/home/nik/Documents/BCPNN_NEST_Module'
                )  #Python checks and inserts the new directory
import BCPNN  # 'pt_module'

nest.ResetKernel()
nest.SetKernelStatus({'resolution': 0.001})
seed = int(time.time() * 1000.0)
nest.SetKernelStatus({'rng_seeds': [seed]})

BCPNN.InstallBCPNN()

syn_ports = {'AMPA': 1, 'NMDA': 2, 'GABA': 3}  #receptor types
f_desired = 1.
f_max = 20.

f_desiredDBC = 7.5
f_maxDBC = 55.

NRN = {
    'cell_model': 'aeif_cond_exp_multisynapse',
    'neuron_params': {
        'AMPA_NEG_E_rev':
        -75.0,  #pseudo-negative reversal potential used for negative BCPNN weights
        'AMPA_Tau_decay': 5.0,  #synaptic time constant
        'Delta_T': 1.0,
Esempio n. 8
0
    n_inputs = 28 * 28
    n_hypercolumns = 30
    n_minicolumns = 100
    n_hidden = n_hypercolumns * n_minicolumns
    n_outputs = 10

    taupdt = 0.002996755526968425
    l1_epochs = 15  #23
    l2_epochs = 25  #298

    l1_pmin = 0.3496214817513042
    l1_khalf = -435.08426155834593
    l1_taubdt = 0.27826430798917945

    #net = BCPNN.Network(np.float32)
    net = BCPNN.Network(np.float64)
    net.add_layer(
        BCPNN.StructuralPlasticityLayer(
            n_inputs, n_hypercolumns, n_minicolumns, taupdt, l1_khalf, l1_pmin,
            l1_taubdt, (1, 1 / n_minicolumns, 1 * 1 / n_minicolumns)))
    net.add_layer(
        BCPNN.DenseLayer(
            n_hidden, 1, n_outputs, taupdt,
            (1 / n_minicolumns, 1 / 10, 1 / n_minicolumns * 1 / 10)))

    train_start = time.time()
    net.fit(training_images, training_labels, batch_size, [(0, l1_epochs),
                                                           (1, l2_epochs)])
    train_stop = time.time()

    test_start = time.time()