Exemple #1
0
def make_brainstem_group(num):
    """Generate integrate-and-fire neuron group that can be connected to a
    auditory nerve fibers group.

    """
    eqs = '''
    dv/dt = (ge-(v+49*mV))/(2*ms) : volt
    dge/dt = -ge/(0.5*ms) : volt
    '''

    group = brian.NeuronGroup(num, eqs, threshold=-50 * mV, reset=-60 * mV)
    group.v = -60 * mV

    return group
Exemple #2
0
    def _build(self):
        '''Create populations.'''

        eqs = brian.Equations('''
            dV/dt  = ge/ms : volt
            dge/dt = ge/ms : volt
            dgi/dt = ge/ms : volt
            ''')

        pop = brian.NeuronGroup(self._N_s + self._N_t,
                                model=eqs,
                                threshold=brian.mV,
                                reset=brian.mV)
        self._source_pop = pop.subgroup(self._N_s)
        self._target_pop = pop.subgroup(self._N_t)
def run_sim(number_neurons=default_number_neurons,
            connection_probability=default_connection_probability,
            synaptic_weights=default_synaptic_weights,
            synaptic_time_constant=default_synaptic_time_constant,
            tend=300):
    '''run a simulation of a population of leaky integrate-and-fire excitatory neurons that are
    randomly connected. The population is injected with a transient current.'''

    from brian.units import mvolt, msecond, namp, Mohm
    import brian
    brian.clear()

    El = 0 * mvolt
    tau_m = 30 * msecond
    tau_syn = synaptic_time_constant * msecond
    R = 20 * Mohm
    v_threshold = 30 * mvolt
    v_reset = 0 * mvolt
    tau_refractory = 4 * msecond

    eqs = brian.Equations('''
    dv/dt = (-(v - El) + R*I)/tau_m : volt
    I = I_syn + I_stim : amp
    dI_syn/dt = -I_syn/tau_syn : amp
    I_stim : amp
    ''')

    external_current = np.zeros(tend)
    external_current[np.arange(0, 100)] = 5 * namp

    group = brian.NeuronGroup(
        model=eqs,
        N=number_neurons, threshold=v_threshold, reset=v_reset, refractory=tau_refractory)

    group.I_stim = brian.TimedArray(external_current, dt=1*msecond)

    connections = brian.Connection(group, group, 'I_syn')
    connections.connect_random(sparseness=connection_probability, weight=synaptic_weights*namp)

    spike_monitor = brian.SpikeMonitor(group)
    population_rate_monitor = brian.PopulationRateMonitor(group, bin=10*msecond)

    brian.reinit()
    brian.run(tend * msecond)

    return spike_monitor, population_rate_monitor
Exemple #4
0
def create_cells(cellclass, cellparams=None, n=1, parent=None):
    """
    Create cells in Brian.
    
    `cellclass`  -- a PyNN standard cell or a native Brian cell class.
    `cellparams` -- a dictionary of cell parameters.
    `n`          -- the number of cells to create
    `parent`     -- the parent Population, or None if the cells don't belong to
                    a Population.
    
    This function is used by both `create()` and `Population.__init__()`
    
    Return:
        - a 1D array of all cell IDs
        - a 1D boolean array indicating which IDs are present on the local MPI
          node
        - the ID of the first cell created
        - the ID of the last cell created
    """
    # currently, we create a single NeuronGroup for create(), but
    # arguably we should use n NeuronGroups each containing a single cell
    # either that or use the subgroup() method in connect(), etc
    assert n > 0, 'n must be a positive integer'
    if isinstance(cellclass, basestring):  # celltype is not a standard cell
        try:
            eqs = brian.Equations(cellclass)
        except Exception, errmsg:
            raise common.InvalidModelError(errmsg)
        v_thresh = cellparams['v_thresh']
        v_reset = cellparams['v_reset']
        tau_refrac = cellparams['tau_refrac']
        brian_cells = brian.NeuronGroup(n,
                                        model=eqs,
                                        threshold=v_thresh,
                                        reset=v_reset,
                                        clock=state.simclock,
                                        compile=True,
                                        max_delay=state.max_delay)
        cell_parameters = cellparams or {}
Exemple #5
0
def simulation_example(brian_clock, brian):

    import numpy

    print "Simulation function!"

    brian.defaultclock = brian_clock

    Number_of_input_neurons = 20

    spiketimes = []
    Input_layer = brian.SpikeGeneratorGroup(20, spiketimes)

    Output_layer = brian.NeuronGroup(Number_of_input_neurons,
                                     model='v:1',
                                     reset=0,
                                     threshold=10)

    S = brian.Synapses(Input_layer, Output_layer, model='w:1', pre='v+=w')

    S[:, :] = 'i==j'
    S.w = 100

    return Input_layer, Output_layer, [Input_layer, Output_layer], [S], []
Exemple #6
0
def make_gbc_group(num, celsius=37):
    ''' Creates a sbc neuron group

    Parameters:
    -----------
    num : int
        Number of neurons in the neuron group.
    celsius : float
        Temperatur in degree celsius, Default = 37

    Output
    ------
    A brian NeuronGroup

    '''

    C = 12 * pF
    Eh = -43 * mV
    EK = -77 * mV  # -70mV in orig py file, but -77*mV in mod file
    El = -65 * mV
    ENa = 50 * mV

    nf = 0.85  # proportion of n vs p kinetics
    zss = 0.5  # steady state inactivation of glt

    q10 = 3.**((celsius - 22) / 10.)
    T10 = 10.**((celsius - 22) / 10.)

    q10_gbar = 1.5

    gnabar = calc_tf(q10_gbar, celsius) * 2500 * nS
    gkhtbar = calc_tf(q10_gbar, celsius) * 150 * nS
    gkltbar = calc_tf(q10_gbar, celsius) * 200 * nS
    ghbar = calc_tf(q10_gbar, celsius) * 20 * nS
    gl = calc_tf(q10_gbar, celsius) * 2 * nS

    eqs = """
    i_stim : amp
    dvm/dt = (ileak + ina + ikht + iklt + ih + i_syn +i_stim) / C : volt
    vu = vm/mV : 1 # unitless v
    """

    # Rothman 1993 Na channel
    eqs_na = """
    ina = gnabar*m**3*h*(ENa-vm) : amp

    dm/dt = malpha * (1. - m) - mbeta * m : 1
    dh/dt = halpha * (1. - h) - hbeta * h : 1

    malpha = (0.36 * q10 * (vu+49.)) / (1. - exp(-(vu+49.)/3.)) /ms : 1/ms
    mbeta = (-0.4 * q10 * (vu+58.)) / (1. - exp((vu+58)/20.)) /ms : 1/ms

    halpha = 2.4*q10 / (1. + exp((vu+68.)/3.)) /ms  +  0.8*T10 / (1. + exp(vu + 61.3)) /ms : 1/ms
    hbeta = 3.6*q10 / (1. + exp(-(vu+21.)/10.)) /ms : 1/ms
    """
    eqs += eqs_na

    # KHT channel (delayed-rectifier K+)
    eqs_kht = """
    ikht = gkhtbar*(nf*n**2 + (1-nf)*p)*(EK-vm) : amp
    dn/dt=q10*(ninf-n)/ntau : 1
    dp/dt=q10*(pinf-p)/ptau : 1
    ninf =   (1 + exp(-(vu + 15) / 5.))**-0.5 : 1
    pinf =  1. / (1 + exp(-(vu + 23) / 6.)) : 1
    ntau =  ((100. / (11*exp((vu+60) / 24.) + 21*exp(-(vu+60) / 23.))) + 0.7)*ms : ms
    ptau = ((100. / (4*exp((vu+60) / 32.) + 5*exp(-(vu+60) / 22.))) + 5)*ms : ms
    """
    eqs += eqs_kht

    # Ih channel (subthreshold adaptive, non-inactivating)
    eqs_ih = """
    ih = ghbar*r*(Eh-vm) : amp
    dr/dt=q10*(rinf-r)/rtau : 1
    rinf = 1. / (1+exp((vu + 76.) / 7.)) : 1
    rtau = ((100000. / (237.*exp((vu+60.) / 12.) + 17.*exp(-(vu+60.) / 14.))) + 25.)*ms : ms
    """
    eqs += eqs_ih

    # KLT channel (low threshold K+)
    eqs_klt = """
    iklt = gkltbar*w**4*z*(EK-vm) : amp
    dw/dt=q10*(winf-w)/wtau : 1
    dz/dt=q10*(zinf-z)/wtau : 1
    winf = (1. / (1 + exp(-(vu + 48.) / 6.)))**0.25 : 1
    zinf = zss + ((1.-zss) / (1 + exp((vu + 71.) / 10.))) : 1
    wtau = ((100. / (6.*exp((vu+60.) / 6.) + 16.*exp(-(vu+60.) / 45.))) + 1.5)*ms : ms
    ztau = ((1000. / (exp((vu+60.) / 20.) + exp(-(vu+60.) / 8.))) + 50)*ms : ms
    """
    eqs += eqs_klt

    # Leak
    eqs_leak = "ileak = gl*(El-vm) : amp"
    eqs += eqs_leak

    ### Excitatory synapse
    # Q10 for synaptic decay calculated from \cite{Postlethwaite2007}
    Tf = calc_tf(q10=0.75, celsius=celsius, ref_temp=37)
    taue_syn = 0.2 * Tf * ms
    taui_syn = 9.03 * calc_tf(q10=0.75, celsius=celsius,
                              ref_temp=34) * ms  # \cite{Xie2013}
    eqs_syn = """
    i_syn = ge_syn*(0*mV - vm) + gi_syn*(-77*mV - vm): amp
    dge_syn/dt = -ge_syn/taue_syn : siemens
    dgi_syn/dt = -gi_syn/taui_syn : siemens
    """
    eqs += eqs_syn

    if celsius < 37:
        refractory = 0.7 * ms
    else:
        refractory = 0.5 * ms

    group = brian.NeuronGroup(
        N=num,
        model=eqs,
        threshold=brian.EmpiricalThreshold(threshold=-20 * mV,
                                           refractory=refractory),
        implicit=True,
    )

    ### Set initial conditions
    group.vm = El
    group.r = 1. / (1 + np.exp((El / mV + 76.) / 7.))
    group.m = group.malpha / (group.malpha + group.mbeta)
    group.h = group.halpha / (group.halpha + group.halpha)
    group.w = (1. / (1 + np.exp(-(El / mV + 48.) / 6.)))**0.25
    group.z = zss + ((1. - zss) / (1 + np.exp((El / mV + 71.) / 10.)))
    group.n = (1 + np.exp(-(El / mV + 15) / 5.))**-0.5
    group.p = 1. / (1 + np.exp(-(El / mV + 23) / 6.))

    return group
Exemple #7
0
fig_num = 1
neuron_groups = {}
input_groups = {}
connections = {}
input_connections = {}
stdp_methods = {}
rate_monitors = {}
spike_monitors = {}
spike_counters = {}

result_monitor = np.zeros((update_interval, conv_features, n_e))

neuron_groups['e'] = b.NeuronGroup(n_e_total,
                                   neuron_eqs_e,
                                   threshold=v_thresh_e,
                                   refractory=refrac_e,
                                   reset=scr_e,
                                   compile=True,
                                   freeze=True)
neuron_groups['i'] = b.NeuronGroup(n_e_total,
                                   neuron_eqs_i,
                                   threshold=v_thresh_i,
                                   refractory=refrac_i,
                                   reset=v_reset_i,
                                   compile=True,
                                   freeze=True)

########################################################
# CREATE NETWORK POPULATIONS AND RECURRENT CONNECTIONS #
########################################################
        dtimer/dt = 1                                               : ms
        '''

# reset equations
theta_plus_e = 0.05 * b.mV
reset_eqs_e = 'v = v_reset_e; theta += theta_plus_e; timer = 0*ms'

# threshold equations
offset = 20 * b.mV
threshold_eqs_e = '(v>(theta - offset + v_threshold_e)) * (timer>time_refractory_e)'

# group instantiation
neuron_groups['spiking'] = b.NeuronGroup(N=spiking_neurons,
                                         model=neuron_eqs_e,
                                         threshold=threshold_eqs_e,
                                         refractory=time_refractory_e,
                                         reset=reset_eqs_e,
                                         compile=True,
                                         freeze=True)

neuron_groups['spiking'].v = v_rest_e - 40. * b.mV
neuron_groups['spiking'].theta = 20 * b.mV

# DEFINE INHIBITORY GROUP

# model equations
v_rest_i = -60. * b.mV
v_reset_i = -45. * b.mV
v_threshold_i = -40. * b.mV

tau_v_i = 10 * b.ms
def build_network():
    global fig_num

    neuron_groups['e'] = b.NeuronGroup(n_e_total, neuron_eqs_e, threshold=v_thresh_e, \
          refractory=refrac_e, reset=scr_e, compile=True, freeze=True)
    neuron_groups['i'] = b.NeuronGroup(n_e_total, neuron_eqs_i, threshold=v_thresh_i, \
         refractory=refrac_i, reset=v_reset_i, compile=True, freeze=True)

    for name in population_names:
        print '...Creating neuron group:', name

        # get a subgroup of size 'n_e' from all exc
        neuron_groups[name + 'e'] = neuron_groups['e'].subgroup(conv_features *
                                                                n_e)
        # get a subgroup of size 'n_i' from the inhibitory layer
        neuron_groups[name + 'i'] = neuron_groups['i'].subgroup(conv_features *
                                                                n_e)

        # start the membrane potentials of these groups 40mV below their resting potentials
        neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
        neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV

    print '...Creating recurrent connections'

    for name in population_names:
        neuron_groups['e'].theta = np.load(
            os.path.join(best_weights_dir,
                         '_'.join(['theta_A', ending + '_best.npy'])))

        for conn_type in recurrent_conn_names:
            if conn_type == 'ei':
                # create connection name (composed of population and connection types)
                conn_name = name + conn_type[0] + name + conn_type[1]
                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(
                    neuron_groups[conn_name[0:2]],
                    neuron_groups[conn_name[2:4]],
                    structure='sparse',
                    state='g' + conn_type[0])

                # instantiate the created connection
                for feature in xrange(conv_features):
                    for n in xrange(n_e):
                        connections[conn_name][feature * n_e + n,
                                               feature * n_e + n] = 10.4

            elif conn_type == 'ie':
                # create connection name (composed of population and connection types)
                conn_name = name + conn_type[0] + name + conn_type[1]

                # load weight matrix
                weight_matrix = np.load(
                    os.path.join(best_weights_dir,
                                 '_'.join([conn_name, ending, 'best.npy'])))

                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(
                    neuron_groups[conn_name[0:2]],
                    neuron_groups[conn_name[2:4]],
                    structure='sparse',
                    state='g' + conn_type[0])

                # define the actual synaptic connections and strengths
                for feature in xrange(conv_features):
                    for other_feature in xrange(conv_features):
                        if feature != other_feature:
                            for n in xrange(n_e):
                                connections[conn_name][feature * n_e + n,
                                                       other_feature * n_e +
                                                       n] = inhibition_level

        print '...Creating monitors for:', name

        # spike rate monitors for excitatory and inhibitory neuron populations
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            neuron_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)
        rate_monitors[name + 'i'] = b.PopulationRateMonitor(
            neuron_groups[name + 'i'],
            bin=(single_example_time + resting_time) / b.second)
        spike_counters[name + 'e'] = b.SpikeCounter(neuron_groups[name + 'e'])

        # record neuron population spikes if specified
        if record_spikes and do_plot:
            spike_monitors[name + 'e'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'e'])
            spike_monitors[name + 'i'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'i'])

    if record_spikes and do_plot:
        b.figure(fig_num, figsize=(8, 6))
        b.ion()
        b.subplot(211)
        b.raster_plot(spike_monitors['Ae'],
                      refresh=1000 * b.ms,
                      showlast=1000 * b.ms,
                      title='Excitatory spikes per neuron')
        b.subplot(212)
        b.raster_plot(spike_monitors['Ai'],
                      refresh=1000 * b.ms,
                      showlast=1000 * b.ms,
                      title='Inhibitory spikes per neuron')
        b.tight_layout()

        fig_num += 1

    # creating Poission spike train from input image (784 vector, 28x28 image)
    for name in input_population_names:
        input_groups[name + 'e'] = b.PoissonGroup(n_input, 0)
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            input_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)

    # creating connections from input Poisson spike train to excitatory neuron population(s)
    for name in input_connection_names:
        print '\n...Creating connections between', name[0], 'and', name[1]

        # for each of the input connection types (in this case, excitatory -> excitatory)
        for conn_type in input_conn_names:
            # saved connection name
            conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]

            # get weight matrix depending on training or test phase
            weight_matrix = np.load(
                os.path.join(best_weights_dir,
                             '_'.join([conn_name, ending + '_best.npy'])))

            # create connections from the windows of the input group to the neuron population
            input_connections[conn_name] = b.Connection(input_groups['Xe'], neuron_groups[name[1] + conn_type[1]], \
                structure='sparse', state='g' + conn_type[0], delay=True, max_delay=delay[conn_type][1])

            for feature in xrange(conv_features):
                for n in xrange(n_e):
                    for idx in xrange(conv_size**2):
                        input_connections[conn_name][convolution_locations[n][idx], feature * n_e + n] = \
                                weight_matrix[convolution_locations[n][idx], feature * n_e + n]

            if do_plot:
                plot_2d_input_weights()
                fig_num += 1

    print '\n'
b.ion()
fig_num = 1
neuron_groups = {}
input_groups = {}
connections = {}
stdp_methods = {}
rate_monitors = {}
spike_monitors = {}
spike_counters = {}
result_monitor = np.zeros((update_interval, n_e))

neuron_groups['e'] = b.NeuronGroup(n_e * len(population_names),
                                   neuron_eqs_e,
                                   threshold=v_thresh_e,
                                   refractory=refrac_e,
                                   reset=scr_e,
                                   compile=True,
                                   freeze=True)

neuron_groups['i'] = b.NeuronGroup(n_i * len(population_names),
                                   neuron_eqs_i,
                                   threshold=v_thresh_i,
                                   refractory=refrac_i,
                                   reset=v_reset_i,
                                   compile=True,
                                   freeze=True)

#------------------------------------------------------------------------------
# create network population and recurrent connections
#------------------------------------------------------------------------------
Exemple #11
0
    //V[i] = (V[i]*!has_spiked)+(-0.06)*has_spiked; // i.e. V[i]=-0.06 if spiked[i] - but this operation requires two global mem accesses
    if(has_spiked)
      V[i] = -0.06;
}
""".replace('SCALAR', precision))
stateupdate_and_threshold = mod.get_function("stateupdate_and_threshold")
reset = mod.get_function("reset")

V = gpuarray.to_gpu(
    numpy.array(numpy.random.rand(N) * 0.01 - 0.06, dtype=mydtype))
ge = gpuarray.to_gpu(numpy.zeros(N, dtype=mydtype))
gi = gpuarray.to_gpu(numpy.zeros(N, dtype=mydtype))

we = 0.00162
wi = -0.009
P = brian.NeuronGroup(N, model='V:1')
Pe = P.subgroup(Ne)
Pi = P.subgroup(Ni)
Ce = brian.Connection(Pe, P, sparseness=sparseness,
                      weight=we).W.connection_matrix()
Ci = brian.Connection(Pi, P, sparseness=sparseness,
                      weight=wi).W.connection_matrix()
Ce.alldata = numpy.array(Ce.alldata, dtype=mydtype)
Ci.alldata = numpy.array(Ci.alldata, dtype=mydtype)
Ce_alldata = drv.mem_alloc(Ce.alldata.nbytes)
drv.memcpy_htod(Ce_alldata, Ce.alldata)
Ce_allj = drv.mem_alloc(Ce.allj.nbytes)
drv.memcpy_htod(Ce_allj, Ce.allj)
Ce_rowind = drv.mem_alloc(Ce.rowind.nbytes)
drv.memcpy_htod(Ce_rowind, Ce.rowind)
Ci_alldata = drv.mem_alloc(Ci.alldata.nbytes)
def fft_nostd(qee, run_num, new_connectivity, osc, rep):

    #bn.seed(int(time.time()))
    #  bn.seed(1412958308+2)
    bn.reinit_default_clock()
    bn.defaultclock.dt = 0.5 * bn.ms

    #==============================================================================
    # Define constants for the model.
    #==============================================================================
    fft_file = './nostd_fft_p20_'
    rate_file = './nostd_rate_p20_'

    if osc:
        T = 8.0 * bn.second
    else:
        T = 3.5 * bn.second
    n_tsteps = T / bn.defaultclock.dt
    fft_start = 3.0 * bn.second / bn.defaultclock.dt  # Time window for the FFT computation
    #run_num = 10
    ro = 1.2 * bn.Hz
    #==============================================================================
    #   Need to do all others besides 0.2 and 0.5
    #==============================================================================
    print qee
    print run_num
    print new_connectivity
    print rep
    qie = 0.3  # Fraction of NMDA receptors for e to i connections

    k = 0.65
    Jeo_const = 1.0  #*bn.mV # Base strength of o (external) to e connections

    Ne = 3200  # number of excitatory neurons
    Ni = 800  # number of inhibitory neurons
    No = 2000  # number of external neurons
    N = Ne + Ni

    pcon = 0.2  # probability of connection

    Jee = 5.0 / (Ne * pcon)
    Jie = 5.0 / (Ne * pcon)
    Jii = k * 5.0 / (Ni * pcon)
    Jei = k * 5.0 / (Ni * pcon)
    Jeo = 1.0

    El = -60.0 * bn.mV  # leak reversal potential
    Vreset = -52.0 * bn.mV  # reversal potential
    Vthresh = -40.0 * bn.mV  # spiking threshold

    tref = 2.0 * bn.ms  # refractory period
    te = 20.0 * bn.ms  # membrane time constant of excitatory neurons
    ti = 10.0 * bn.ms  # membrane time constant of inhibitory neruons
    tee_ampa = 10.0 * bn.ms  # time const of ampa currents at excitatory neurons
    tee_nmda = 100.0 * bn.ms  # time const of nmda currents at excitatory neurons
    tie_ampa = 10.0 * bn.ms  # time const of ampa currents at inhibitory neurons
    tie_nmda = 100.0 * bn.ms  # time const of nmda currents at inhibitory neurons
    tii_gaba = 10.0 * bn.ms  # time const of GABA currents at inhibitory neurons
    tei_gaba = 10.0 * bn.ms  # time const of GABA currents at excitatory neurons
    teo_input = 100.0 * bn.ms

    #==============================================================================
    # Define model structure
    #==============================================================================

    model = '''
  dV/dt = (-(V-El)+J_ampa*I_ampa+J_nmda*I_nmda-J_gaba*I_gaba+J_input*I_input+eta+eta_corr)/tm : bn.volt
  dI_ampa/dt = -I_ampa/t_ampa : bn.volt
  dI_nmda/dt = -I_nmda/t_nmda : bn.volt
  dI_gaba/dt = -I_gaba/t_gaba : bn.volt
  dI_input/dt = (-I_input+mu)/t_input : bn.volt
  J_ampa : 1
  J_nmda : 1
  J_gaba : 1
  J_input : 1
  mu : bn.volt
  eta : bn.volt
  eta_corr : bn.volt
  tm : bn.second
  t_ampa : bn.second
  t_nmda : bn.second
  t_gaba : bn.second
  t_input : bn.second
  '''

    P_reset = "V=-52*bn.mV"

    Se_model = '''
  we_ampa : bn.volt
  we_nmda : bn.volt
  '''

    Se_pre = ('I_ampa += we_ampa', 'I_nmda += we_nmda')

    Si_model = '''
  wi_gaba : bn.volt
  '''

    Si_pre = 'I_gaba += wi_gaba'

    So_model = '''
  wo_input : bn.volt
  '''

    So_pre = 'I_input += wo_input'

    #==============================================================================
    # Define populations
    #==============================================================================

    P = bn.NeuronGroup(N,
                       model,
                       threshold=Vthresh,
                       reset=P_reset,
                       refractory=tref)

    Pe = P[0:Ne]
    Pe.tm = te
    Pe.t_ampa = tee_ampa
    Pe.t_nmda = tee_nmda
    Pe.t_gaba = tei_gaba
    Pe.t_input = teo_input
    Pe.I_ampa = 0 * bn.mV
    Pe.I_nmda = 0 * bn.mV
    Pe.I_gaba = 0 * bn.mV
    Pe.I_input = 0 * bn.mV
    Pe.V = (np.random.rand(Pe.V.size) * 12 - 52) * bn.mV

    Pi = P[Ne:(Ne + Ni)]
    Pi.tm = ti
    Pi.t_ampa = tie_ampa
    Pi.t_nmda = tie_nmda
    Pi.t_gaba = tii_gaba
    Pi.t_input = teo_input
    Pi.I_ampa = 0 * bn.mV
    Pi.I_nmda = 0 * bn.mV
    Pi.I_gaba = 0 * bn.mV
    Pi.I_input = 0 * bn.mV
    Pi.V = (np.random.rand(Pi.V.size) * 12 - 52) * bn.mV

    Pe.J_ampa = Jee * (1 - qee)  #*SEE1
    Pe.J_nmda = Jee * qee  #*SEE1

    Pi.J_ampa = Jie * (1 - qie)  #*SEE1
    Pi.J_nmda = Jie * qie  #*SEE1

    Pe.J_gaba = Jei
    Pi.J_gaba = Jii

    Pe.J_input = Jeo
    Pi.J_input = Jeo

    #==============================================================================
    # Define inputs
    #==============================================================================
    if osc:
        Pe.mu = 2.0 * bn.mV
        holder = np.zeros((n_tsteps, ))
        t_freq = np.linspace(0, 10, n_tsteps)

        fo = 0.2  # Smallest frequency in the signal
        fe = 10.0  # Largest frequency in the signal
        F = int(fe / 0.2)
        for m in range(1, F + 1):
            holder = holder + np.cos(2 * np.pi * m * fo * t_freq - m *
                                     (m - 1) * np.pi / F)
        holder = holder / np.max(holder)
        Pe.eta = bn.TimedArray(0.0 * bn.mV * holder)  #, dt=0.5*bn.ms)
        Pe.eta_corr = 0 * bn.mV

        Background_eo = bn.PoissonInput(Pe,
                                        N=1000,
                                        rate=1.0 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')
        Background_io = bn.PoissonInput(Pi,
                                        N=1000,
                                        rate=1.05 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')

        Pi.mu = 0 * bn.mV
        Pi.eta = 0 * bn.mV  #, dt=0.5*bn.ms)
        Pi.eta_corr = 0 * bn.mV

        Po = bn.PoissonGroup(No, rates=0 * bn.Hz)

    else:
        Background_eo = bn.PoissonInput(Pe,
                                        N=1000,
                                        rate=1.0 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')
        Background_io = bn.PoissonInput(Pi,
                                        N=1000,
                                        rate=1.05 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')

        holder_pe = np.zeros((n_tsteps, ))
        time_steps = np.linspace(0, T / bn.second, n_tsteps)
        holder_pe[time_steps < 0.5] = 0.0 * bn.mV
        holder_pe[time_steps >= 0.5] = 3.0 * bn.mV  # 35.0/Jeo *bn.mV #25
        Pe.mu = bn.TimedArray(holder_pe)

        def firing_function(t, ro):
            if t > 0.5 * bn.second and t < 3.5 * bn.second:
                return 0.0 * bn.Hz
            else:
                return 0.0 * bn.Hz

#    Pe.mu = 0*bn.mV

        Pe.eta = 0 * bn.mV  #, dt=0.5*bn.ms)
        Pi.mu = 0.0 * bn.mV
        Pi.eta = 0 * bn.mV  #, dt=0.5*bn.ms)

        Po = bn.PoissonGroup(No, rates=lambda t: firing_function(t, ro))

#==============================================================================
# Define synapses
#==============================================================================

    See = bn.Synapses(Pe, Pe, model=Se_model, pre=Se_pre)
    Sie = bn.Synapses(Pe, Pi, model=Se_model, pre=Se_pre)

    Sei = bn.Synapses(Pi, Pe, model=Si_model, pre=Si_pre)
    Sii = bn.Synapses(Pi, Pi, model=Si_model, pre=Si_pre)

    Seo = bn.Synapses(Po, Pe, model=So_model, pre=So_pre)

    #==============================================================================
    #  Define monitors
    #==============================================================================

    Pe_mon_V = bn.StateMonitor(Pe, 'V', timestep=1, record=True)
    Pe_mon_eta = bn.StateMonitor(Pe, 'eta', timestep=1, record=True)
    Pe_mon_ampa = bn.StateMonitor(Pe, 'I_ampa', timestep=1, record=True)
    Pe_mon_nmda = bn.StateMonitor(Pe, 'I_nmda', timestep=1, record=True)
    Pe_mon_gaba = bn.StateMonitor(Pe, 'I_gaba', timestep=1, record=True)
    Pe_ratemon = bn.PopulationRateMonitor(Pe, bin=10.0 * bn.ms)

    #==============================================================================
    # Define random connections
    #==============================================================================

    if new_connectivity:
        See.connect_random(Pe, Pe, sparseness=pcon)
        Sie.connect_random(Pe, Pi, sparseness=pcon)
        Sii.connect_random(Pi, Pi, sparseness=pcon)
        Sei.connect_random(Pi, Pe, sparseness=pcon)
        Seo.connect_random(Po, Pe, sparseness=pcon)

        print 'Saving'
        See.save_connectivity('./See_connections_nostd_saver_p20' +
                              str(run_num))
        Sie.save_connectivity('./Sie_connections_nostd_saver_p20' +
                              str(run_num))
        Sii.save_connectivity('./Sii_connections_nostd_saver_p20' +
                              str(run_num))
        Sei.save_connectivity('./Sei_connections_nostd_saver_p20' +
                              str(run_num))
        Seo.save_connectivity('./Seo_connections_nostd_saver_p20' +
                              str(run_num))

    else:
        print 'Loading'
        See.load_connectivity('./See_connections_nostd_saver_p20' +
                              str(run_num))
        Sie.load_connectivity('./Sie_connections_nostd_saver_p20' +
                              str(run_num))
        Sii.load_connectivity('./Sii_connections_nostd_saver_p20' +
                              str(run_num))
        Sei.load_connectivity('./Sei_connections_nostd_saver_p20' +
                              str(run_num))
        Seo.load_connectivity('./Seo_connections_nostd_saver_p20' +
                              str(run_num))

    See.we_ampa = 1.0 * bn.mV / tee_ampa
    See.we_nmda = 1.0 * bn.mV / tee_nmda

    Sie.we_ampa = 1.0 * bn.mV / tie_ampa
    Sie.we_nmda = 1.0 * bn.mV / tie_nmda

    Sei.wi_gaba = 1.0 * bn.mV / tei_gaba
    Sii.wi_gaba = 1.0 * bn.mV / tii_gaba

    Seo.wo_input = 1.0 * bn.mV / teo_input

    #==============================================================================
    # Run model
    #==============================================================================

    timer = 0 * bn.second
    t_start = time.time()
    bn.run(T, report='graphical')
    timer = timer + T
    print '-------------------------------------------------------'
    print 'Time is ' + str(timer) + ' seconds'
    t_end = time.time()
    print 'Time to compute last ' +str(T)+' seconds is: ' + \
          str(t_end - t_start) + ' seconds'
    print '-------------------------------------------------------\n'

    #==============================================================================
    # Save into a Matlab file
    #==============================================================================

    if osc:
        Pe_output = Pe.J_ampa[0] * Pe_mon_ampa.values + Pe.J_nmda[
            0] * Pe_mon_nmda.values - Pe.J_gaba[0] * Pe_mon_gaba.values
        Pe_output = Pe_output[:, fft_start:, ]
        Pe_glut = Pe.J_ampa[0] * Pe_mon_ampa.values + Pe.J_nmda[
            0] * Pe_mon_nmda.values
        Pe_glut = Pe_glut[:, fft_start:, ]
        Pe_gaba = Pe.J_gaba[0] * Pe_mon_gaba.values[:, fft_start:, ]

        Pe_V = Pe_mon_V.values[:, fft_start:, ]
        Pe_input = Pe_mon_eta[:, fft_start:, ]
        T_step = bn.defaultclock.dt

        holder = {
            'Pe_output': Pe_output,
            'Pe_input': Pe_input,
            'Pe_V': Pe_V,
            'Pe_glut': Pe_glut,
            'Pe_gaba': Pe_gaba,
            'T_step': T_step
        }
        scipy.io.savemat(fft_file + 'qee' + str(qee) + '_' + str(rep),
                         mdict=holder)

    else:
        holder = {'Pe_rate': Pe_ratemon.rate, 'Pe_time': Pe_ratemon.times}
        scipy.io.savemat(rate_file + 'qee_' + str(qee) + '_' + str(run_num) +
                         'rep' + str(rep),
                         mdict=holder)
def build_network():
    global fig_num

    neuron_groups['e'] = b.NeuronGroup(n_e_total,
                                       neuron_eqs_e,
                                       threshold=v_thresh_e,
                                       refractory=refrac_e,
                                       reset=scr_e,
                                       compile=True,
                                       freeze=True)
    neuron_groups['i'] = b.NeuronGroup(n_e_total,
                                       neuron_eqs_i,
                                       threshold=v_thresh_i,
                                       refractory=refrac_i,
                                       reset=v_reset_i,
                                       compile=True,
                                       freeze=True)

    for name in ['A']:
        print '...Creating neuron group:', name

        # get a subgroup of size 'n_e' from all exc
        neuron_groups[name + 'e'] = neuron_groups['e'].subgroup(conv_features *
                                                                n_e)
        # get a subgroup of size 'n_i' from the inhibitory layer
        neuron_groups[name + 'i'] = neuron_groups['i'].subgroup(conv_features *
                                                                n_e)

        # start the membrane potentials of these groups 40mV below their resting potentials
        neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
        neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV

    print '...Creating recurrent connections'

    for name in ['A']:
        neuron_groups['e'].theta = np.load(
            os.path.join(best_weights_dir,
                         '_'.join(['theta_A', ending + '_best.npy'])))

        for conn_type in ['ei', 'ie']:
            if conn_type == 'ei':
                # create connection name (composed of population and connection types)
                conn_name = name + conn_type[0] + name + conn_type[1]
                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], \
                    neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
                # instantiate the created connection
                for feature in xrange(conv_features):
                    for n in xrange(n_e):
                        connections[conn_name][feature * n_e + n,
                                               feature * n_e + n] = 10.4

            elif conn_type == 'ie' and not remove_inhibition:
                # create connection name (composed of population and connection types)
                conn_name = name + conn_type[0] + name + conn_type[1]
                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], \
                    neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
                # define the actual synaptic connections and strengths
                for feature in xrange(conv_features):
                    if inhib_scheme in ['far', 'strengthen']:
                        for other_feature in set(range(conv_features)) - set(
                                neighbor_mapping[feature]):
                            if inhib_scheme == 'far':
                                for n in xrange(n_e):
                                    connections[conn_name][feature * n_e + n,
                                                           other_feature *
                                                           n_e + n] = 17.4

                            elif inhib_scheme == 'strengthen':
                                if n_e == 1:
                                    x, y = feature // np.sqrt(
                                        n_e_total), feature % np.sqrt(
                                            n_e_total)
                                    x_, y_ = other_feature // np.sqrt(
                                        n_e_total), other_feature % np.sqrt(
                                            n_e_total)
                                else:
                                    x, y = feature // np.sqrt(
                                        conv_features), feature % np.sqrt(
                                            conv_features)
                                    x_, y_ = other_feature // np.sqrt(
                                        conv_features
                                    ), other_feature % np.sqrt(conv_features)

                                for n in xrange(n_e):
                                    connections[conn_name][feature * n_e + n, other_feature * n_e + n] = \
                                        min(17.4, inhib_const * np.sqrt(euclidean([x, y], [x_, y_])))

                    elif inhib_scheme == 'increasing':
                        for other_feature in xrange(conv_features):
                            if n_e == 1:
                                x, y = feature // np.sqrt(
                                    n_e_total), feature % np.sqrt(n_e_total)
                                x_, y_ = other_feature // np.sqrt(
                                    n_e_total), other_feature % np.sqrt(
                                        n_e_total)
                            else:
                                x, y = feature // np.sqrt(
                                    conv_features), feature % np.sqrt(
                                        conv_features)
                                x_, y_ = other_feature // np.sqrt(
                                    conv_features), other_feature % np.sqrt(
                                        conv_features)

                            if feature != other_feature:
                                for n in xrange(n_e):
                                    connections[conn_name][feature * n_e + n, other_feature * n_e + n] = \
                                        min(17.4, inhib_const * np.sqrt(euclidean([x, y], [x_, y_])))

                    else:
                        raise Exception(
                            'Expecting one of "far", "increasing", or "strengthen" for argument "inhib_scheme".'
                        )

        # spike rate monitors for excitatory and inhibitory neuron populations
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            neuron_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)
        rate_monitors[name + 'i'] = b.PopulationRateMonitor(
            neuron_groups[name + 'i'],
            bin=(single_example_time + resting_time) / b.second)
        spike_counters[name + 'e'] = b.SpikeCounter(neuron_groups[name + 'e'])

        # record neuron population spikes if specified
        if record_spikes:
            spike_monitors[name + 'e'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'e'])
            spike_monitors[name + 'i'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'i'])

    if record_spikes and do_plot:
        if reset_state_vars:
            time_window = single_example_time * 1000
        else:
            time_window = (single_example_time + resting_time) * 1000

        b.figure(fig_num, figsize=(8, 6))
        b.ion()
        b.subplot(211)
        b.raster_plot(spike_monitors['Ae'],
                      refresh=time_window * b.ms,
                      showlast=time_window * b.ms,
                      title='Excitatory spikes per neuron')
        b.subplot(212)
        b.raster_plot(spike_monitors['Ai'],
                      refresh=time_window * b.ms,
                      showlast=time_window * b.ms,
                      title='Inhibitory spikes per neuron')
        b.tight_layout()

        fig_num += 1

    # creating Poission spike train from input image (784 vector, 28x28 image)
    for name in ['X']:
        input_groups[name + 'e'] = b.PoissonGroup(n_input, 0)
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            input_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)

    # creating connections from input Poisson spike train to convolution patch populations
    for name in ['XA']:
        print '\n...Creating connections between', name[0], 'and', name[1]

        # for each of the input connection types (in this case, excitatory -> excitatory)
        for conn_type in ['ee_input']:
            # saved connection name
            conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]

            # get weight matrix depending on training or test phase
            weight_matrix = np.load(
                os.path.join(best_weights_dir,
                             '_'.join([conn_name, ending + '_best.npy'])))

            # create connections from the windows of the input group to the neuron population
            input_connections[conn_name] = b.Connection(input_groups['Xe'], neuron_groups[name[1] + \
              conn_type[1]], structure='sparse', state='g' + conn_type[0], delay=True, max_delay=delay[conn_type][1])

            for feature in xrange(conv_features):
                for n in xrange(n_e):
                    for idx in xrange(conv_size**2):
                        input_connections[conn_name][convolution_locations[n][idx], feature * n_e + n] = \
                             weight_matrix[convolution_locations[n][idx], feature * n_e + n]

            if do_plot:
                plot_2d_input_weights()
                fig_num += 1
Exemple #14
0
def make_mso_group(num,
                   gbar_na=3900,
                   gbar_klt=650,
                   gbar_h=520,
                   gbar_leak=13,
                   e_rest=-55.8,
                   voltage_clamp=False):
    ''' Creates a sbc neuron group

    Parameters:
    -----------
    num : int
        Number of neurons in the neuron group.
    celsius : float
        Temperatur in degree celsius, Default = 37
    Output
    ------
    A brian NeuronGroup

    '''
    C = 70 * pF  # [Couchman2010]

    e_h = -35 * mV  # [Baumann2013]
    e_k = -90 * mV

    e_e = -0 * mV  #[Colbourne2005]
    e_i = -70 * mV  #[Coulboure2005]

    e_na = 56.2 * mV
    e_rest = e_rest * mV  # Between -60 and -55

    nf = 1  # proportion of n vs p kinetics
    zss = 0.4  # steady state inactivation of glt

    gbar_na = gbar_na * nS
    gbar_klt = gbar_klt * nS
    gbar_h = gbar_h * nS
    gbar_leak = gbar_leak * nS

    if not voltage_clamp:
        eqs = """
        i_stim : amp
        dvm/dt = - (-i_stim + i_leak + i_na  + i_klt + i_h + i_syn) / C : volt
        vu = vm/mV : 1 # unitless v
        """

    else:
        eqs = """
        i_stim : amp
        vm : volt
        vu = vm/mV : 1 # unitless v
        """

    eqs_na = """
        g_na = gbar_na * m**3 * h : nsiemens
        i_na = g_na * (vm - e_na) : amp

        m_inf = 1 / (1 + exp((vu + 38) / -7)) :1
        h_inf = 1 / (1 + exp((vu + 65) / 6)) :1

        tau_m = (0.48 / (5 * exp((vu + 60) / 18) + 36 * exp((vu + 60) / -25)) ) * ms : ms
        tau_h = (19.23 / (7 * exp((vu + 60) / 11) + 10 * exp((vu + 60) / -25)) + 0.12) * ms : ms

        dm/dt = (m_inf - m) / tau_m :1
        dh/dt = (h_inf - h) / tau_h :1
        """
    eqs += eqs_na

    # [Baumann2013] (Dorsal Cells)
    eqs_h = """
        g_h = gbar_h * a  :nsiemens
        i_h =  g_h * (vm - e_h) : amp

        a_inf = 1 / (1 + exp(0.1 * (vu + 80.4))) :1
        tau_a = (79 + 417 * exp(-(vu + 61.5)**2 / 800)) *ms :ms

        da/dt = (a_inf - a) / tau_a :1
        """
    eqs += eqs_h

    # Potassium low threshold [Khurana2011]
    eqs_klt = """
        g_klt = gbar_klt * w**4 * z : nsiemens
        i_klt =  g_klt * (vm - e_k) : amp

        z_inf = zss + ((1-zss) / (1 + exp((vu + 57) / 5.44))) : 1
        w_inf = 1 / (1 + exp(-(vu + 57.3) / 11.7)) :1


        tau_w = 0.46 * (100. / (6. * exp((vu + 75.) / 12.15) + 24. * exp(-(vu + 75.) / 25) + 0.55)) * ms : ms
        tau_z = 0.24 * ((1000 / (exp((vu + 60) / 20) + exp(-(vu + 60) / 8))) + 50) * ms : ms

        dw/dt =( w_inf - w) / tau_w :1
        dz/dt = (z_inf - z) / tau_z :1
        """
    eqs += eqs_klt

    # leak
    eqs_leak = """
    g_leak = gbar_leak * 1 : nsiemens
    i_leak = g_leak * (vm - e_rest) : amp
    """
    eqs += eqs_leak

    #Synaptic Current
    eqs_syn = """
        ex_syn_i :1
        ex_syn_c :1
        in_syn_i :1
        in_syn_c :1

        i_syn = i_syn_ii + i_syn_ic + i_syn_ei + i_syn_ec : amp

        #inhibitory currents
        i_syn_ii = in_syn_i * siemens * (vm - e_i) : amp
        i_syn_ic = in_syn_c * siemens * (vm - e_i) : amp

        #exitatory currents
        i_syn_ei = ex_syn_i * siemens * (vm - e_e) : amp
        i_syn_ec = ex_syn_c * siemens * (vm - e_e) : amp
        """
    eqs += eqs_syn

    group = brian.NeuronGroup(
        N=num,
        model=eqs,
        threshold=brian.EmpiricalThreshold(threshold=-30 * mV,
                                           refractory=0.5 * ms),
        implicit=True,
    )

    ### Set initial conditions
    group.vm = e_rest
    group.m = group.m_inf
    group.h = group.h_inf
    group.w = group.w_inf
    group.z = group.z_inf
    group.a = group.a_inf

    return group
Exemple #15
0
def build_network():
    global fig_num, assignments

    neuron_groups['e'] = b.NeuronGroup(n_e_total,
                                       neuron_eqs_e,
                                       threshold=v_thresh_e,
                                       refractory=refrac_e,
                                       reset=scr_e,
                                       compile=True,
                                       freeze=True)
    neuron_groups['i'] = b.NeuronGroup(n_e_total,
                                       neuron_eqs_i,
                                       threshold=v_thresh_i,
                                       refractory=refrac_i,
                                       reset=v_reset_i,
                                       compile=True,
                                       freeze=True)

    for name in population_names:
        print '...Creating neuron group:', name

        # get a subgroup of size 'n_e' from all exc
        neuron_groups[name + 'e'] = neuron_groups['e'].subgroup(conv_features *
                                                                n_e)
        # get a subgroup of size 'n_i' from the inhibitory layer
        neuron_groups[name + 'i'] = neuron_groups['i'].subgroup(conv_features *
                                                                n_e)

        # start the membrane potentials of these groups 40mV below their resting potentials
        neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
        neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV

    print '...Creating recurrent connections'

    for name in population_names:
        # if we're in test mode / using some stored weights
        if test_mode:
            # load up adaptive threshold parameters
            if save_best_model:
                neuron_groups['e'].theta = np.load(
                    os.path.join(best_weights_dir,
                                 '_'.join(['theta_A', ending + '_best.npy'])))
            else:
                neuron_groups['e'].theta = np.load(
                    os.path.join(end_weights_dir,
                                 '_'.join(['theta_A', ending + '_end.npy'])))
        else:
            # otherwise, set the adaptive additive threshold parameter at 20mV
            neuron_groups['e'].theta = np.ones((n_e_total)) * 20.0 * b.mV

        for conn_type in recurrent_conn_names:
            if conn_type == 'ei':
                # create connection name (composed of population and connection types)
                conn_name = name + conn_type[0] + name + conn_type[1]
                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(
                    neuron_groups[conn_name[0:2]],
                    neuron_groups[conn_name[2:4]],
                    structure='sparse',
                    state='g' + conn_type[0])

                # instantiate the created connection
                for feature in xrange(conv_features):
                    for n in xrange(n_e):
                        connections[conn_name][feature * n_e + n,
                                               feature * n_e + n] = 10.4

            elif conn_type == 'ie':
                # create connection name (composed of population and connections types)
                conn_name = name + conn_type[0] + name + conn_type[
                    1] + '_' + ending
                # create a connection from the first group in conn_name with the second group
                connections[conn_name] = b.Connection(
                    neuron_groups[conn_name[0:2]],
                    neuron_groups[conn_name[2:4]],
                    structure='sparse',
                    state='g' + conn_type[0])
                # instantiate the created connection with the 'weightMatrix' loaded from file
                for feature in xrange(conv_features):
                    for other_feature in xrange(conv_features):
                        if feature != other_feature:
                            for n in xrange(n_e):
                                connections[conn_name][feature * n_e + n,
                                                       other_feature * n_e +
                                                       n] = 17.4

        print '...Creating monitors for:', name

        # spike rate monitors for excitatory and inhibitory neuron populations
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            neuron_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)
        rate_monitors[name + 'i'] = b.PopulationRateMonitor(
            neuron_groups[name + 'i'],
            bin=(single_example_time + resting_time) / b.second)
        spike_counters[name + 'e'] = b.SpikeCounter(neuron_groups[name + 'e'])

        # record neuron population spikes if specified
        if record_spikes or plot:
            spike_monitors[name + 'e'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'e'])
            spike_monitors[name + 'i'] = b.SpikeMonitor(neuron_groups[name +
                                                                      'i'])

    if record_spikes and plot:
        b.figure(fig_num, figsize=(8, 6))

        fig_num += 1

        b.ion()
        b.subplot(211)
        b.raster_plot(spike_monitors['Ae'],
                      refresh=1000 * b.ms,
                      showlast=1000 * b.ms,
                      title='Excitatory spikes per neuron')
        b.subplot(212)
        b.raster_plot(spike_monitors['Ai'],
                      refresh=1000 * b.ms,
                      showlast=1000 * b.ms,
                      title='Inhibitory spikes per neuron')
        b.tight_layout()

    # creating Poission spike train from input image (784 vector, 28x28 image)
    for name in input_population_names:
        input_groups[name + 'e'] = b.PoissonGroup(n_input, 0)
        rate_monitors[name + 'e'] = b.PopulationRateMonitor(
            input_groups[name + 'e'],
            bin=(single_example_time + resting_time) / b.second)

    # creating connections from input Poisson spike train to excitatory neuron population(s)
    for name in input_connection_names:
        print '\n...Creating connections between', name[0], 'and', name[1]

        # for each of the input connection types (in this case, excitatory -> excitatory)
        for conn_type in input_conn_names:
            # saved connection name
            conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]

            # get weight matrix depending on training or test phase
            if test_mode:
                if save_best_model:
                    weight_matrix = np.load(
                        os.path.join(
                            best_weights_dir,
                            '_'.join([conn_name, ending + '_best.npy'])))
                else:
                    weight_matrix = np.load(
                        os.path.join(
                            end_weights_dir,
                            '_'.join([conn_name, ending + '_end.npy'])))

            # create connections from the windows of the input group to the neuron population
            input_connections[conn_name] = b.Connection(input_groups['Xe'], neuron_groups[name[1] + conn_type[1]], \
                structure='sparse', state='g' + conn_type[0], delay=True, max_delay=delay[conn_type][1])

            if test_mode:
                for feature in xrange(conv_features):
                    for n in xrange(n_e):
                        for idx in xrange(conv_size**2):
                            input_connections[conn_name][convolution_locations[n][idx], feature * n_e + n] = \
                                    weight_matrix[convolution_locations[n][idx], feature * n_e + n]
            else:
                for feature in xrange(conv_features):
                    for n in xrange(n_e):
                        for idx in xrange(conv_size**2):
                            input_connections[conn_name][
                                convolution_locations[n][idx],
                                feature * n_e + n] = (b.random() + 0.01) * 0.3

            if test_mode:
                if plot:
                    plot_weights_and_assignments(assignments)
                    fig_num += 1

        # if excitatory -> excitatory STDP is specified, add it here (input to excitatory populations)
        if not test_mode:
            print '...Creating STDP for connection', name

            # STDP connection name
            conn_name = name[0] + conn_type[0] + name[1] + conn_type[1]
            # create the STDP object
            stdp_methods[conn_name] = b.STDP(input_connections[conn_name], eqs=eqs_stdp_ee, \
                pre=eqs_stdp_pre_ee, post=eqs_stdp_post_ee, wmin=0., wmax=wmax_ee)

    print '\n'
	def __init__(mode, connectivity, weight_dependence, post_pre, conv_size, conv_stride, conv_features, weight_sharing, lattice_structure, random_inhibition_prob, top_percent):
		'''
		Network initialization.
		'''

		# setting input parameters
		this.mode = mode
		this.connectivity = connectivity
		this.weight_dependence = weight_dependence
		this.post_pre = post_pre
		this.conv_size = conv_size
		this.conv_features = conv_features
		this.weight_sharing = weight_sharing
		this.lattice_structure = lattice_structure
		this.random_inhibition_prob = random_inhibition_prob

		# load training or testing data
		if mode == 'train':
		    start = time.time()
		    this.data = get_labeled_data(MNIST_data_path + 'training')
		    end = time.time()
		    print 'time needed to load training set:', end - start
		else:
		    start = time.time()
		    this.data = get_labeled_data(MNIST_data_path + 'testing', bTrain = False)
		    end = time.time()
		    print 'time needed to load test set:', end - start

		# set parameters for simulation based on train / test mode
		if test_mode:
			weight_path = top_level_path + 'weights/conv_patch_connectivity_weights/'
			this.num_examples = 10000 * 1
			this.do_plot_performance = False
			ee_STDP_on = False
		else:
			weight_path = top_level_path + 'random/conv_patch_connectivity_random/'
			this.num_examples = 60000 * 1
			this.do_plot_performance = True
			ee_STDP_on = True

		# plotting or not
		do_plot = True

		# number of inputs to the network
		this.n_input = 784
		this.n_input_sqrt = int(math.sqrt(n_input))

		# number of neurons parameters
		this.n_e = ((n_input_sqrt - conv_size) / conv_stride + 1) ** 2
		this.n_e_total = n_e * conv_features
		this.n_e_sqrt = int(math.sqrt(n_e))
		this.n_i = n_e
		this.conv_features_sqrt = int(math.sqrt(conv_features))

		# time (in seconds) per data example presentation and rest period in between, used to calculate total runtime
		this.single_example_time = 0.35 * b.second
		this.resting_time = 0.15 * b.second
		runtime = num_examples * (single_example_time + resting_time)

		# set the update interval
		if test_mode:
			this.update_interval = num_examples
		else:
			this.update_interval = 100

		# rest potential parameters, reset potential parameters, threshold potential parameters, and refractory periods
		v_rest_e, v_rest_i = -65. * b.mV, -60. * b.mV
		v_reset_e, v_reset_i = -65. * b.mV, -45. * b.mV
		v_thresh_e, v_thresh_i = -52. * b.mV, -40. * b.mV
		refrac_e, refrac_i = 5. * b.ms, 2. * b.ms

		# dictionaries for weights and delays
		weight, delay = {}, {}

		# populations, connections, saved connections, etc.
		input_population_names = [ 'X' ]
		population_names = [ 'A' ]
		input_connection_names = [ 'XA' ]
		save_conns = [ 'XeAe', 'AeAe' ]

		# weird and bad names for variables, I think
		input_conn_names = [ 'ee_input' ]
		recurrent_conn_names = [ 'ei', 'ie', 'ee' ]
		
		# setting weight, delay, and intensity parameters
		weight['ee_input'] = (conv_size ** 2) * 0.175
		delay['ee_input'] = (0 * b.ms, 10 * b.ms)
		delay['ei_input'] = (0 * b.ms, 5 * b.ms)
		input_intensity = start_input_intensity = 2.0

		# time constants, learning rates, max weights, weight dependence, etc.
		tc_pre_ee, tc_post_ee = 20 * b.ms, 20 * b.ms
		nu_ee_pre, nu_ee_post = 0.0001, 0.01
		wmax_ee = 1.0
		exp_ee_post = exp_ee_pre = 0.2
		w_mu_pre, w_mu_post = 0.2, 0.2

		# setting up differential equations (depending on train / test mode)
		if test_mode:
			scr_e = 'v = v_reset_e; timer = 0*ms'
		else:
			tc_theta = 1e7 * b.ms
			theta_plus_e = 0.05 * b.mV
			scr_e = 'v = v_reset_e; theta += theta_plus_e; timer = 0*ms'

		offset = 20.0 * b.mV
		v_thresh_e = '(v>(theta - offset + ' + str(v_thresh_e) + ')) * (timer>refrac_e)'

		# equations for neurons
		neuron_eqs_e = '''
				dv/dt = ((v_rest_e - v) + (I_synE + I_synI) / nS) / (100 * ms)  : volt
				I_synE = ge * nS *         -v                           : amp
				I_synI = gi * nS * (-100.*mV-v)                          : amp
				dge/dt = -ge/(1.0*ms)                                   : 1
				dgi/dt = -gi/(2.0*ms)                                  : 1
				'''
		if test_mode:
			neuron_eqs_e += '\n  theta      :volt'
		else:
			neuron_eqs_e += '\n  dtheta/dt = -theta / (tc_theta)  : volt'

		neuron_eqs_e += '\n  dtimer/dt = 100.0 : ms'

		neuron_eqs_i = '''
				dv/dt = ((v_rest_i - v) + (I_synE + I_synI) / nS) / (10*ms)  : volt
				I_synE = ge * nS *         -v                           : amp
				I_synI = gi * nS * (-85.*mV-v)                          : amp
				dge/dt = -ge/(1.0*ms)                                   : 1
				dgi/dt = -gi/(2.0*ms)                                  : 1
				'''

		# creating dictionaries for various objects
		this.neuron_groups = {}
		this.input_groups = {}
		this.connections = {}
		this.input_connections = {}
		this.stdp_methods = {}
		this.rate_monitors = {}
		this.spike_monitors = {}
		this.spike_counters = {}

		# creating excitatory, inhibitory populations
		this.neuron_groups['e'] = b.NeuronGroup(n_e_total, neuron_eqs_e, threshold=v_thresh_e, refractory=refrac_e, reset=scr_e, compile=True, freeze=True)
		this.neuron_groups['i'] = b.NeuronGroup(n_e_total, neuron_eqs_i, threshold=v_thresh_i, refractory=refrac_i, reset=v_reset_i, compile=True, freeze=True)

		# creating subpopulations of excitatory, inhibitory neurons
		for name in population_names:
			print '...creating neuron group:', name

			# get a subgroup of size 'n_e' from all exc
			neuron_groups[name + 'e'] = neuron_groups['e'].subgroup(conv_features * n_e)
			# get a subgroup of size 'n_i' from the inhibitory layer
			neuron_groups[name + 'i'] = neuron_groups['i'].subgroup(conv_features * n_e)

			# start the membrane potentials of these groups 40mV below their resting potentials
			neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
			neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV

		print '...creating recurrent connections'

		for name in population_names:
			# if we're in test mode / using some stored weights
			if mode == 'test' or weight_path[-8:] == 'weights/conv_patch_connectivity_weights/':
				# load up adaptive threshold parameters
				neuron_groups['e'].theta = np.load(weight_path + 'theta_A' + '_' + ending +'.npy')
			else:
				# otherwise, set the adaptive additive threshold parameter at 20mV
				neuron_groups['e'].theta = np.ones((n_e_total)) * 20.0 * b.mV

			for conn_type in recurrent_conn_names:
				if conn_type == 'ei':
					# create connection name (composed of population and connection types)
					conn_name = name + conn_type[0] + name + conn_type[1]
					# create a connection from the first group in conn_name with the second group
					connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
					# instantiate the created connection
					for feature in xrange(conv_features):
						for n in xrange(n_e):
							connections[conn_name][feature * n_e + n, feature * n_e + n] = 10.4

				elif conn_type == 'ie':
					# create connection name (composed of population and connection types)
					conn_name = name + conn_type[0] + name + conn_type[1]
					# create a connection from the first group in conn_name with the second group
					connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
					# instantiate the created connection
					for feature in xrange(conv_features):
						for other_feature in xrange(conv_features):
							if feature != other_feature:
								for n in xrange(n_e):
									connections[conn_name][feature * n_e + n, other_feature * n_e + n] = 17.4

					if random_inhibition_prob != 0.0:
						for feature in xrange(conv_features):
							for other_feature in xrange(conv_features):
								for n_this in xrange(n_e):
									for n_other in xrange(n_e):
										if n_this != n_other:
											if b.random() < random_inhibition_prob:
												connections[conn_name][feature * n_e + n_this, other_feature * n_e + n_other] = 17.4

				elif conn_type == 'ee':
					# create connection name (composed of population and connection types)
					conn_name = name + conn_type[0] + name + conn_type[1]
					# get weights from file if we are in test mode
					if mode == 'test':
						weight_matrix = get_matrix_from_file(weight_path + conn_name + '_' + ending + '.npy', conv_features * n_e, conv_features * n_e)
					# create a connection from the first group in conn_name with the second group
					connections[conn_name] = b.Connection(neuron_groups[conn_name[0:2]], neuron_groups[conn_name[2:4]], structure='sparse', state='g' + conn_type[0])
					# instantiate the created connection
					if connectivity == 'all':
						for feature in xrange(conv_features):
							for other_feature in xrange(conv_features):
								if feature != other_feature:
									for this_n in xrange(n_e):
										for other_n in xrange(n_e):
											if is_lattice_connection(n_e_sqrt, this_n, other_n):
												if mode == 'test':
													connections[conn_name][feature * n_e + this_n, other_feature * n_e + other_n] = weight_matrix[feature * n_e + this_n, other_feature * n_e + other_n]
												else:
													connections[conn_name][feature * n_e + this_n, other_feature * n_e + other_n] = (b.random() + 0.01) * 0.3

					elif connectivity == 'pairs':
						for feature in xrange(conv_features):
							if feature % 2 == 0:
								for this_n in xrange(n_e):
									for other_n in xrange(n_e):
										if is_lattice_connection(n_e_sqrt, this_n, other_n):
											if mode == 'test':
												connections[conn_name][feature * n_e + this_n, (feature + 1) * n_e + other_n] = weight_matrix[feature * n_e + this_n, (feature + 1) * n_e + other_n]
											else:
												connections[conn_name][feature * n_e + this_n, (feature + 1) * n_e + other_n] = (b.random() + 0.01) * 0.3
							elif feature % 2 == 1:
								for this_n in xrange(n_e):
									for other_n in xrange(n_e):
										if is_lattimode == 'test'ce_connection(n_e_sqrt, this_n, other_n):
											if mode == 'test':
												connections[conn_name][feature * n_e + this_n, (feature - 1) * n_e + other_n] = weight_matrix[feature * n_e + this_n, (feature - 1) * n_e + other_n]
											else:
												connections[conn_name][feature * n_e + this_n, (feature - 1) * n_e + other_n] = (b.random() + 0.01) * 0.3

					elif connectivity == 'none':
						pass

			# if STDP from excitatory -> excitatory is on and this connection is excitatory -> excitatory
			if ee_STDP_on and 'ee' in recurrent_conn_names:
				stdp_methods[name + 'e' + name + 'e'] = b.STDP(connections[name + 'e' + name + 'e'], eqs=eqs_stdp_ee, pre=eqs_stdp_pre_ee, post=eqs_stdp_post_ee, wmin=0., wmax=wmax_ee)

			print '...creating monitors for:', name

			# spike rate monitors for excitatory and inhibitory neuron populations
			rate_monitors[name + 'e'] = b.PopulationRateMonitor(neuron_groups[name + 'e'], bin=(single_example_time + resting_time) / b.second)
			rate_monitors[name + 'i'] = b.PopulationRateMonitor(neuron_groups[name + 'i'], bin=(single_example_time + resting_time) / b.second)
			spike_counters[name + 'e'] = b.SpikeCounter(neuron_groups[name + 'e'])

			# record neuron population spikes if specified
			spike_monitors[name + 'e'] = b.SpikeMonitor(neuron_groups[name + 'e'])
			spike_monitors[name + 'i'] = b.SpikeMonitor(neuron_groups[name + 'i'])
Exemple #17
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dtype,
                 input_scaling=100,
                 input_conn_frac=.5,
                 dt=1,
                 we_scaling=2,
                 wi_scaling=.5,
                 we_sparseness=.1,
                 wi_sparseness=.1):
        super(BrianIFReservoirNode, self).__init__(input_dim=input_dim,
                                                   output_dim=output_dim,
                                                   dtype=dtype)
        self.taum = 20 * brian.ms
        self.taue = 5 * brian.ms
        self.taui = 10 * brian.ms
        self.Vt = 15 * brian.mV
        self.Vr = 0 * brian.mV
        self.frac_e = .75
        self.input_scaling = input_scaling
        self.input_conn_frac = input_conn_frac
        self.dt = dt
        self.we_scaling = we_scaling
        self.wi_scaling = wi_scaling
        self.we_sparseness = we_sparseness
        self.wi_sparseness = wi_sparseness

        self.eqs = brian.Equations('''
              dV/dt  = (I-V+ge-gi)/self.taum : volt
              dge/dt = -ge/self.taue    : volt 
              dgi/dt = -gi/self.taui    : volt
              I: volt
              ''')
        self.G = brian.NeuronGroup(N=output_dim,
                                   model=self.eqs,
                                   threshold=self.Vt,
                                   reset=self.Vr)
        self.Ge = self.G.subgroup(int(scipy.floor(
            output_dim * self.frac_e)))  # Excitatory neurons
        self.Gi = self.G.subgroup(
            int(scipy.floor(output_dim * (1 - self.frac_e))))

        self.internal_conn = brian.Connection(self.G, self.G)
        self.we = self.we_scaling * scipy.random.rand(len(self.Ge), len(
            self.G)) * brian.nS
        self.wi = self.wi_scaling * scipy.random.rand(len(self.Ge), len(
            self.G)) * brian.nS

        self.Ce = brian.Connection(self.Ge,
                                   self.G,
                                   'ge',
                                   sparseness=self.we_sparseness,
                                   weight=self.we)
        self.Ci = brian.Connection(self.Gi,
                                   self.G,
                                   'gi',
                                   sparseness=self.wi_sparseness,
                                   weight=self.wi)

        #self.internal_conn.connect(self.G, self.G, self.w_res)

        self.Mv = brian.StateMonitor(self.G, 'V', record=True, timestep=10)
        self.Ms = brian.SpikeMonitor(self.G, record=True)
        self.w_in = self.input_scaling * (scipy.random.rand(
            self.output_dim, self.input_dim)) * (scipy.random.rand(
                self.output_dim, self.input_dim) < self.input_conn_frac)
        self.network = brian.Network(self.G, self.Ce, self.Ci, self.Ge,
                                     self.Gi, self.Mv, self.Ms)
def run_sim(ffExcInputMult=None, ffInhInputMult=None):
    """Run the cond-based LIF neuron simulation.  Takes a few minutes to construct network and run


    Parameters
    ----------
    ffExcInputMult: scalar: FF input magnitude to E cells.  multiply ffInputV by this value and connect to E cells
    ffInhInputMult: scalar: FF input magnitude to I cells.

    Returns
    -------
    outDict - spike times, records of continuous values from simulation

    """

    # use helper to get input timecourses
    (ffInputV, condAddV) = create_input_vectors(
        doDebugPlot=False)  # multiplied by scalars below

    # setup initial state
    stT = time.time()
    brian.set_global_preferences(usecodegen=True)
    brian.set_global_preferences(useweave=True)
    brian.set_global_preferences(usecodegenweave=True)
    brian.clear(erase=True, all=True)
    brian.reinit_default_clock()
    clk = brian.Clock(dt=0.05 * ms)

    ################

    # create neurons, define connections
    neurNetwork = brian.NeuronGroup(nNet,
                                    model=eqs,
                                    threshold=vthresh,
                                    reset=vrest,
                                    refractory=absRefractoryMs * msecond,
                                    order=1,
                                    compile=True,
                                    freeze=False,
                                    clock=clk)

    # create neuron pools
    neurCE = neurNetwork.subgroup(nExc)
    neurCI = neurNetwork.subgroup(nInh)
    connCE = brian.Connection(neurCE, neurNetwork, 'ge')
    connCI = brian.Connection(neurCI, neurNetwork, 'gi')
    print('n cells: %d, nE,I %d,%d, %s, absRefractoryMs: %d' %
          (nNet, nExc, nInh, repr(clk), absRefractoryMs))

    # connect the network to itself
    connCE.connect_random(neurCE,
                          neurNetwork,
                          internalSparseness,
                          weight=connENetWeight)
    connCI.connect_random(neurCI,
                          neurNetwork,
                          internalSparseness,
                          weight=connINetWeight)

    # connect inputs that change spont rate
    assert (
        spontAddRate <= 0
    ), 'Spont add rate should be negative - convention: neg, excite inhibitory cells'
    spontAddNInpSyn = 100
    nTotalSpontNeurons = (spontAddNInpSyn * nInh * 0.02)
    neurSpont = brian.PoissonGroup(nTotalSpontNeurons,
                                   -1.0 * spontAddRate * Hz)
    connCSpont = brian.Connection(neurSpont, neurCI, 'ge')
    connCSpont.connect_random(
        p=spontAddNInpSyn * 1.0 / nTotalSpontNeurons,
        weight=connENetWeight,  # match internal excitatory strengths
        fixed=True)

    # connect the feedforward visual (poisson) inputs to excitatory cells (ff E)
    ffExcInputNInpSyn = 100
    nTotalFfNeurons = (ffExcInputNInpSyn * ffExcInputNTargs * 0.02
                       )  # one pop of input cells for both E and I FF
    _ffExcInputV = ffExcInputMult * np.abs(a_(ffInputV).copy())
    assert (np.all(
        _ffExcInputV >= 0)), 'Negative FF rates are rectified to zero'
    neurFfExcInput = brian.PoissonGroup(
        nTotalFfNeurons, lambda t: _ffExcInputV[int(t * 1000)] * Hz)
    connCFfExcInput = brian.Connection(neurFfExcInput, neurNetwork, 'ge')
    connCFfExcInput.connect_random(neurFfExcInput,
                                   neurCE[0:ffExcInputNTargs],
                                   ffExcInputNInpSyn * 1.0 / nTotalFfNeurons,
                                   weight=connENetWeight,
                                   fixed=True)

    # connect the feedforward visual (poisson) inputs to inhibitory cells (ff I)
    ffInhInputNInpSyn = 100
    _ffInhInputV = ffInhInputMult * np.abs(ffInputV.copy())
    assert (np.all(
        _ffInhInputV >= 0)), 'Negative FF rates are rectified to zero'
    neurFfInhInput = brian.PoissonGroup(
        nTotalFfNeurons, lambda t: _ffInhInputV[int(t * 1000)] * Hz)
    connCFfInhInput = brian.Connection(neurFfInhInput, neurNetwork, 'ge')
    connCFfInhInput.connect_random(
        neurFfInhInput,
        neurCI[0:ffInhInputNTargs],
        ffInhInputNInpSyn * 1.0 / nTotalFfNeurons,  # sparseness
        weight=connENetWeight,
        fixed=True)

    # connect added step (ChR2) conductance to excitatory cells
    condAddAmp = 4.0
    gAdd = brian.TimedArray(condAddAmp * condAddV, dt=1 * ms)
    print('Adding conductance for %d cells (can be slow): ' %
          len(condAddNeurNs),
          end=' ')
    for (iN, tN) in enumerate(condAddNeurNs):
        neurCE[tN].gAdd = gAdd
    print('done')

    # Initialize using some randomness so all neurons don't start in same state.
    # Alternative: initialize with constant values, give net extra 100-300ms to evolve from initial state.
    neurNetwork.v = (brian.randn(1) * 5.0 - 65) * mvolt
    neurNetwork.ge = brian.randn(nNet) * 1.5 + 4
    neurNetwork.gi = brian.randn(nNet) * 12 + 20

    # Record continuous variables and spikes
    monSTarg = brian.SpikeMonitor(neurNetwork)
    if contRecNs is not None:
        contRecClock = brian.Clock(dt=contRecStepMs * ms)
        monVTarg = brian.StateMonitor(neurNetwork,
                                      'v',
                                      record=contRecNs,
                                      clock=contRecClock)
        monGETarg = brian.StateMonitor(neurNetwork,
                                       'ge',
                                       record=contRecNs,
                                       clock=contRecClock)
        monGAddTarg = brian.StateMonitor(neurNetwork,
                                         'gAdd',
                                         record=contRecNs,
                                         clock=contRecClock)
        monGITarg = brian.StateMonitor(neurNetwork,
                                       'gi',
                                       record=contRecNs,
                                       clock=contRecClock)

    # construct brian.Network before running (so brian explicitly knows what to update during run)
    netL = [
        neurNetwork, connCE, connCI, monSTarg, neurFfExcInput, connCFfExcInput,
        neurFfInhInput, connCFfInhInput, neurSpont, connCSpont
    ]
    if contRecNs is not None:
        # noinspection PyUnboundLocalVariable
        netL.append([monVTarg, monGETarg, monGAddTarg,
                     monGITarg])  # cont monitors
    net = brian.Network(netL)
    print("Network construction time: %3.1f seconds" % (time.time() - stT))

    # run
    print("Simulation running...")
    sys.stdout.flush()
    start_time = time.time()
    net.run(simRunTimeS * second, report='text', report_period=30.0 * second)
    durationS = time.time() - start_time
    print("Simulation time: %3.1f seconds" % durationS)

    outNTC = collections.namedtuple(
        'outNTC',
        'vm ge gadd gi clockDtS clockStartS clockEndS spiketimes contRecNs')
    outNTC.__new__.__defaults__ = (None, ) * len(
        outNTC._fields)  # default to None
    outNT = outNTC(clockDtS=float(monSTarg.clock.dt),
                   clockStartS=float(monSTarg.clock.start),
                   clockEndS=float(monSTarg.clock.end),
                   spiketimes=a_(monSTarg.spiketimes.values(), dtype='O'),
                   contRecNs=contRecNs)
    if contRecNs is not None:
        outNT = outNT._replace(vm=monVTarg.values,
                               ge=monGETarg.values,
                               gadd=monGAddTarg.values,
                               gi=monGITarg.values)
    return outNT
Exemple #19
0
#u0 = -13960*br.mV#olt#b*v0
u0 = (25 * (-5 * A * B + A**2 * B**2)) * br.mV
v0 = (25 * (-5 + A**2 * B**2)) * br.mV

reset = '''
    v = c
    u += d
'''

img = np.empty(img_dims)

count = 0
g = 2

liquid_neurons = br.NeuronGroup(N_liquid[0],
                                model=eqs_hidden_neurons,
                                refractory=2 * br.ms,
                                reset=reset)
liquid_inputs = liquid_neurons.subgroup(N_liquid[1])
liquid_hidden = liquid_neurons.subgroup(N_liquid[0] - N_liquid[1] -
                                        N_liquid[2])
liquid_output = liquid_neurons.subgroup(N_liquid[2])

spikes = []
hidden_neurons = []  # * len(N_hidden)
input_neurons = br.SpikeGeneratorGroup(N_in + 1, spikes)

Sin = br.Synapses(input_neurons, liquid_inputs, model='w:1', pre='ge+=w')
Sliq = br.Synapses(liquid_neurons, liquid_neurons, model='w:1', pre='ge+=w')

for i in range(len(N_hidden)):
    hidden_neurons.append(
import brian_no_units
import brian
from pylab import *

eq1=brian.Equations('g=gmax*c0 :volt')
#eq1+=brian.Equations('g_ch : 1') #uncomment this to make it work
eq1+=brian.Equations('dc0/dt=-1 :volt')
eq1+=brian.Equations('gmax :1')
g1=brian.NeuronGroup(1, model=eq1)

eq2=brian.Equations('dv/dt=-v+gch*(erev-v): volt')
eq2+=brian.Equations('erev :1')
eq2+=brian.Equations('gch :1')
g2=brian.NeuronGroup(1, model=eq2)

#g1.g_ch = brian.linked_var(g1, 'g') #uncomment this to make it work

g2.gch = brian.linked_var(g1, 'g') #comment this to make it work
#g2.gch = brian.linked_var(g1, 'g_ch') #uncomment this to make it work

s1=brian.StateMonitor(g1, 'g', record=0)
s2=brian.StateMonitor(g1, 'c0', record=0)

s3=brian.StateMonitor(g2, 'v', record=0)
s4=brian.StateMonitor(g2, 'gch', record=0)

s5=brian.StateMonitor(g1, 'gmax', record=0)

#initialize
g1.g=0.0 #commenting this ALONE will make it work
#g1.g_ch=0.0 #uncomment this to make it work
Exemple #21
0
def fft_std(delta_u, run_num, new_connectivity, osc, rep):
    #bn.seed(int(time.time()))
    bn.reinit_default_clock()
    #bn.seed(1412958308+2)
    bn.defaultclock.dt = 0.5 * bn.ms

    #==============================================================================
    # Define constants for the model.
    #==============================================================================
    fft_file = './std_fft_p20_'
    rate_file = './std_rate_p20_'
    print delta_u
    print run_num
    print new_connectivity
    print rep

    if osc:
        T = 5.5 * bn.second
    else:
        T = 2.5 * bn.second
    n_tsteps = T / bn.defaultclock.dt
    fft_start = 0.5 * bn.second / bn.defaultclock.dt  # Time window for the FFT computation
    ro = 1.2 * bn.Hz

    SEE1 = 1.0
    SEE2 = 1.0
    qee1 = 1.00  # Fraction of NMDA receptors for e to e connections
    qee2 = 0.00
    qie1 = 1.00  # Fraction of NMDA receptors for e to i connections
    qie2 = 0.00

    uee1 = 0.2 - delta_u
    uee2 = 0.2 + delta_u
    uie1 = 0.2
    uie2 = 0.2
    trec1 = 1000.0 * bn.ms
    trec2 = 1000.0 * bn.ms

    k = 0.65
    #Jeo_const = 1.0#*bn.mV # Base strength of o (external) to e connections

    Ne = 3200  # number of excitatory neurons
    Ni = 800  # number of inhibitory neurons
    No = 20000  # number of external neurons
    N = Ne + Ni

    pcon = 0.2  # probability of connection

    Jee = 10.0 / (Ne * pcon)
    Jie = 10.0 / (Ne * pcon)
    Jii = k * 10.0 / (Ni * pcon)
    Jei = k * 10.0 / (Ni * pcon)
    Jeo = 1.0

    El = -60.0 * bn.mV  # leak reversal potential
    Vreset = -52.0 * bn.mV  # reversal potential
    Vthresh = -40.0 * bn.mV  # spiking threshold

    tref = 2.0 * bn.ms  # refractory period
    te = 20.0 * bn.ms  # membrane time constant of excitatory neurons
    ti = 10.0 * bn.ms  # membrane time constant of inhibitory neruons
    tee_ampa = 10.0 * bn.ms  # time const of ampa currents at excitatory neurons
    tee_nmda = 100.0 * bn.ms  # time const of nmda currents at excitatory neurons
    tie_ampa = 10.0 * bn.ms  # time const of ampa currents at inhibitory neurons
    tie_nmda = 100.0 * bn.ms  # time const of nmda currents at inhibitory neurons
    tii_gaba = 10.0 * bn.ms  # time const of GABA currents at inhibitory neurons
    tei_gaba = 10.0 * bn.ms  # time const of GABA currents at excitatory neurons
    teo_input = 100.0 * bn.ms

    #==============================================================================
    # Define model structure
    #==============================================================================

    model = '''
  dV/dt = (-(V-El)+J_ampa1*I_ampa1+J_nmda1*I_nmda1+J_ampa2*I_ampa2+J_nmda2*I_nmda2-J_gaba*I_gaba+J_input*I_input+eta)/tm : bn.volt
  dI_ampa1/dt = -I_ampa1/t_ampa : bn.volt
  dI_nmda1/dt = -I_nmda1/t_nmda : bn.volt
  dI_ampa2/dt = -I_ampa2/t_ampa : bn.volt
  dI_nmda2/dt = -I_nmda2/t_nmda : bn.volt
  dI_gaba/dt = -I_gaba/t_gaba : bn.volt
  dI_input/dt = (-I_input+mu)/t_input : bn.volt
  dx1/dt = (1-x1)/t1_rec : 1
  dx2/dt = (1-x2)/t2_rec : 1
  u1 : 1
  t1_rec : bn.second
  u2 : 1
  t2_rec : bn.second
  mu : bn.volt
  eta : bn.volt
  J_ampa1 : 1
  J_nmda1 : 1
  J_ampa2 : 1
  J_nmda2 : 1
  J_gaba : 1
  J_input : 1
  tm : bn.second
  t_ampa : bn.second
  t_nmda : bn.second
  t_gaba : bn.second
  t_input : bn.second
  '''

    P_reset = "V=-52*bn.mV;x1+=-u1*x1;x2+=-u2*x2"

    Se_model = '''
  we_ampa1 : bn.volt
  we_nmda1 : bn.volt
  we_ampa2 : bn.volt
  we_nmda2 : bn.volt
  '''

    Se_pre = ('I_ampa1 += x1_pre*we_ampa1', 'I_nmda1 += x1_pre*we_nmda1',
              'I_ampa2 += x2_pre*we_ampa2', 'I_nmda2 += x2_pre*we_nmda2')

    Si_model = '''
  wi_gaba : bn.volt
  '''

    Si_pre = 'I_gaba += wi_gaba'

    So_model = '''
  wo_input : bn.volt
  '''

    So_pre = 'I_input += wo_input'

    #==============================================================================
    # Define populations
    #==============================================================================

    P = bn.NeuronGroup(N,
                       model,
                       threshold=Vthresh,
                       reset=P_reset,
                       refractory=tref)

    Pe = P[0:Ne]
    Pe.tm = te
    Pe.t_ampa = tee_ampa
    Pe.t_nmda = tee_nmda
    Pe.t_gaba = tei_gaba
    Pe.t_input = teo_input
    Pe.I_ampa1 = 0 * bn.mV
    Pe.I_nmda1 = 0 * bn.mV
    Pe.I_ampa2 = 0 * bn.mV
    Pe.I_nmda2 = 0 * bn.mV
    Pe.I_gaba = 0 * bn.mV
    Pe.I_input = 0 * bn.mV
    Pe.V = (np.random.rand(Pe.V.size) * 12 - 52) * bn.mV

    Pe.x1 = 1.0
    Pe.x2 = 1.0
    Pe.u1 = uee1
    Pe.u2 = uee2
    Pe.t1_rec = trec1
    Pe.t2_rec = trec2

    Pi = P[Ne:(Ne + Ni)]
    Pi.tm = ti
    Pi.t_ampa = tie_ampa
    Pi.t_nmda = tie_nmda
    Pi.t_gaba = tii_gaba
    Pi.t_input = teo_input
    Pi.I_ampa1 = 0 * bn.mV
    Pi.I_nmda1 = 0 * bn.mV
    Pi.I_ampa2 = 0 * bn.mV
    Pi.I_nmda2 = 0 * bn.mV
    Pi.I_gaba = 0 * bn.mV
    Pi.I_input = 0 * bn.mV
    Pi.V = (np.random.rand(Pi.V.size) * 12 - 52) * bn.mV

    Pi.x1 = 1.0
    Pi.x2 = 1.0
    Pi.u1 = 0.0
    Pi.u2 = 0.0
    Pi.t1_rec = 1.0
    Pi.t2_rec = 1.0

    Pe.J_ampa1 = Jee * (1 - qee1)  #*SEE1
    Pe.J_nmda1 = Jee * qee1  #*SEE1
    Pe.J_ampa2 = Jee * (1 - qee2)  #*SEE2
    Pe.J_nmda2 = Jee * qee2  #*SEE2

    Pi.J_ampa1 = Jie * (1 - qie2)  #*SEE2
    Pi.J_nmda1 = Jie * qie2  #*SEE2
    Pi.J_ampa2 = Jie * (1 - qie1)  #*SEE1
    Pi.J_nmda2 = Jie * qie1  #*SEE1

    Pe.J_gaba = Jei
    Pi.J_gaba = Jii

    Pe.J_input = Jeo
    Pi.J_input = Jeo

    #==============================================================================
    # Define inputs
    #==============================================================================

    if osc:
        Pe.mu = 12.0 * bn.mV
        holder = np.zeros((n_tsteps, ))
        t_freq = np.linspace(0, 10, n_tsteps)

        fo = 0.2  # Smallest frequency in the signal
        fe = 10.0  # Largest frequency in the signal
        F = int(fe / 0.2)
        for m in range(1, F + 1):
            holder = holder + np.cos(2 * np.pi * m * fo * t_freq - m *
                                     (m - 1) * np.pi / F)
        holder = holder / np.max(holder)
        Pe.eta = bn.TimedArray(0.0 * bn.mV * holder)  #, dt=0.5*bn.ms)

        Background_eo = bn.PoissonInput(Pe,
                                        N=1000,
                                        rate=1.05 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')
        Background_io = bn.PoissonInput(Pi,
                                        N=1000,
                                        rate=1.0 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')

        Pi.mu = 0 * bn.mV
        Pi.eta = 0 * bn.mV  #, dt=0.5*bn.ms)

        Po = bn.PoissonGroup(No, rates=0 * bn.Hz)
    else:

        Background_eo = bn.PoissonInput(Pe,
                                        N=1000,
                                        rate=1.05 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')
        Background_io = bn.PoissonInput(Pi,
                                        N=1000,
                                        rate=1.0 * bn.Hz,
                                        weight=0.2 * bn.mV,
                                        state='I_input')
        holder_pe = np.zeros((n_tsteps, ))
        time_steps = np.linspace(0, T / bn.second, n_tsteps)
        holder_pe[time_steps < 0.5] = 0.0 * bn.mV
        holder_pe[time_steps >= 0.5] = 6.0 * bn.mV  #25
        holder_pe[time_steps > 1.5] = 0.0 * bn.mV  #25
        Pe.mu = bn.TimedArray(holder_pe)

        def firing_function(t, ro):
            if t > 0.5 * bn.second and t < 3.5 * bn.second:
                return 0.0 * bn.Hz
            else:
                return 0.0 * bn.Hz

        Pe.eta = 0 * bn.mV  #, dt=0.5*bn.ms)
        Pi.mu = 0.0 * bn.mV
        Pi.eta = 0 * bn.mV  #, dt=0.5*bn.ms)

        Po = bn.PoissonGroup(No, rates=lambda t: firing_function(t, ro))

    #==============================================================================
    # Define synapses
    #==============================================================================

    See1 = bn.Synapses(Pe, Pe, model=Se_model, pre=Se_pre)
    See2 = bn.Synapses(Pe, Pe, model=Se_model, pre=Se_pre)
    Sie1 = bn.Synapses(Pe, Pi, model=Se_model, pre=Se_pre)
    Sie2 = bn.Synapses(Pe, Pi, model=Se_model, pre=Se_pre)

    Sei = bn.Synapses(Pi, Pe, model=Si_model, pre=Si_pre)
    Sii = bn.Synapses(Pi, Pi, model=Si_model, pre=Si_pre)

    Seo = bn.Synapses(Po, Pe, model=So_model, pre=So_pre)

    #==============================================================================
    # Define random connections
    #==============================================================================

    if new_connectivity:
        See1.connect_random(Pe, Pe, sparseness=pcon / 2.0)
        See2.connect_random(Pe, Pe, sparseness=pcon / 2.0)
        Sie1.connect_random(Pe, Pi, sparseness=pcon / 2.0)
        Sie2.connect_random(Pe, Pi, sparseness=pcon / 2.0)
        Sii.connect_random(Pi, Pi, sparseness=pcon)
        Sei.connect_random(Pi, Pe, sparseness=pcon)
        Seo.connect_random(Po, Pe, sparseness=pcon)

        print 'Saving'
        See1.save_connectivity('./See1_connections_std_saver_p20_' +
                               str(run_num))
        See2.save_connectivity('./See2_connections_std_saver_p20_' +
                               str(run_num))
        Sie1.save_connectivity('./Sie1_connections_std_saver_p20_' +
                               str(run_num))
        Sie2.save_connectivity('./Sie2_connections_std_saver_p20_' +
                               str(run_num))
        Sii.save_connectivity('./Sii_connections_std_saver_p20_' +
                              str(run_num))
        Sei.save_connectivity('./Sei_connections_std_saver_p20_' +
                              str(run_num))
        Seo.save_connectivity('./Seo_connections_std_saver_p20_' +
                              str(run_num))
    else:
        print 'Loading'
        See1.load_connectivity('./See1_connections_std_saver_p20_' +
                               str(run_num))
        See2.load_connectivity('./See2_connections_std_saver_p20_' +
                               str(run_num))
        Sie1.load_connectivity('./Sie1_connections_std_saver_p20_' +
                               str(run_num))
        Sie2.load_connectivity('./Sie2_connections_std_saver_p20_' +
                               str(run_num))
        Sii.load_connectivity('./Sii_connections_std_saver_p20_' +
                              str(run_num))
        Sei.load_connectivity('./Sei_connections_std_saver_p20_' +
                              str(run_num))
        Seo.load_connectivity('./Seo_connections_std_saver_p20_' +
                              str(run_num))

    See1.we_ampa1 = SEE1 * 1.0 * bn.mV / tee_ampa
    See1.we_nmda1 = SEE1 * 1.0 * bn.mV / tee_nmda
    See1.we_ampa2 = 0.0 * bn.mV / tee_ampa
    See1.we_nmda2 = 0.0 * bn.mV / tee_nmda

    See2.we_ampa1 = 0.0 * bn.mV / tee_ampa
    See2.we_nmda1 = 0.0 * bn.mV / tee_nmda
    See2.we_ampa2 = SEE2 * 1.0 * bn.mV / tee_ampa
    See2.we_nmda2 = SEE2 * 1.0 * bn.mV / tee_nmda

    Sie1.we_ampa1 = 0.0 * bn.mV / tie_ampa
    Sie1.we_nmda1 = 0.0 * bn.mV / tie_nmda
    Sie1.we_ampa2 = SEE1 * 1.0 * bn.mV / tie_ampa
    Sie1.we_nmda2 = SEE1 * 1.0 * bn.mV / tie_nmda

    Sie2.we_ampa1 = SEE2 * 1.0 * bn.mV / tie_ampa
    Sie2.we_nmda1 = SEE2 * 1.0 * bn.mV / tie_nmda
    Sie2.we_ampa2 = 0.0 * bn.mV / tie_ampa
    Sie2.we_nmda2 = 0.0 * bn.mV / tie_nmda

    Sei.wi_gaba = 1.0 * bn.mV / tei_gaba
    Sii.wi_gaba = 1.0 * bn.mV / tii_gaba

    Seo.wo_input = 1.0 * bn.mV / teo_input

    #==============================================================================
    #  Define monitors
    #==============================================================================

    Pe_mon_V = bn.StateMonitor(Pe, 'V', timestep=10, record=True)
    Pe_mon_eta = bn.StateMonitor(Pe, 'eta', timestep=1, record=True)
    Pe_mon_ampa1 = bn.StateMonitor(Pe, 'I_ampa1', timestep=1, record=True)
    Pe_mon_nmda1 = bn.StateMonitor(Pe, 'I_nmda1', timestep=1, record=True)
    Pe_mon_ampa2 = bn.StateMonitor(Pe, 'I_ampa2', timestep=1, record=True)
    Pe_mon_nmda2 = bn.StateMonitor(Pe, 'I_nmda2', timestep=1, record=True)
    Pe_mon_gaba = bn.StateMonitor(Pe, 'I_gaba', timestep=1, record=True)
    Pe_mon_input = bn.StateMonitor(Pe, 'I_input', timestep=10, record=True)
    See1_mon_x = bn.StateMonitor(Pe, 'x1', timestep=10, record=True)
    See2_mon_x = bn.StateMonitor(Pe, 'x2', timestep=10, record=True)

    Pe_ratemon = bn.PopulationRateMonitor(Pe, bin=10.0 * bn.ms)
    Pi_ratemon = bn.PopulationRateMonitor(Pi, bin=10.0 * bn.ms)

    #==============================================================================
    # Run model
    #==============================================================================
    timer = 0 * bn.second
    t_start = time.time()
    bn.run(T, report='graphical')
    timer = timer + T
    print '-------------------------------------------------------'
    print 'Time is ' + str(timer) + ' seconds'
    t_end = time.time()
    print 'Time to compute last ' +str(T)+' seconds is: ' + \
          str(t_end - t_start) + ' seconds'
    print '-------------------------------------------------------\n'

    Pe_mon_ampa1_vals = Pe.J_ampa1[0] * np.mean(Pe_mon_ampa1.values.T, axis=1)
    Pe_mon_nmda1_vals = Pe.J_nmda1[0] * np.mean(Pe_mon_nmda1.values.T, axis=1)
    Pe_mon_ampa2_vals = Pe.J_ampa2[0] * np.mean(Pe_mon_ampa2.values.T, axis=1)
    Pe_mon_nmda2_vals = Pe.J_nmda2[0] * np.mean(Pe_mon_nmda2.values.T, axis=1)
    Pe_mon_ampa_vals = Pe_mon_ampa1_vals + Pe_mon_ampa2_vals
    Pe_mon_nmda_vals = Pe_mon_nmda1_vals + Pe_mon_nmda2_vals

    Pe_mon_gaba_vals = Pe.J_gaba[0] * np.mean(Pe_mon_gaba.values.T, axis=1)
    Pe_mon_input_vals = Pe.J_input[0] * np.mean(Pe_mon_input.values.T, axis=1)
    Pe_mon_V_vals = np.mean(Pe_mon_V.values.T, axis=1)

    Pe_mon_all_vals = Pe_mon_ampa_vals + Pe_mon_nmda_vals - Pe_mon_gaba_vals

    See1_mon_x_vals = np.mean(See1_mon_x.values.T, axis=1)
    See2_mon_x_vals = np.mean(See2_mon_x.values.T, axis=1)

    #==============================================================================
    # Save into a Matlab file
    #==============================================================================

    if osc:

        Pe_output = Pe.J_ampa1[0]*Pe_mon_ampa1.values+Pe.J_nmda1[0]*Pe_mon_nmda1.values + \
        Pe.J_ampa2[0]*Pe_mon_ampa2.values+Pe.J_nmda2[0]*Pe_mon_nmda2.values-Pe.J_gaba[0]*Pe_mon_gaba.values
        Pe_output = Pe_output[:, fft_start:, ]
        Pe_V = Pe_mon_V.values[:, fft_start:, ]
        Pe_glut = Pe.J_ampa1[0]*Pe_mon_ampa1.values+Pe.J_nmda1[0]*Pe_mon_nmda1.values + \
        Pe.J_ampa2[0]*Pe_mon_ampa2.values+Pe.J_nmda2[0]*Pe_mon_nmda2.values
        Pe_glut = Pe_glut[:, fft_start:, ]
        Pe_gaba = Pe.J_gaba[0] * Pe_mon_gaba.values
        Pe_gaba = Pe_gaba[:, fft_start:, ]

        Pe_input = Pe_mon_eta[:, fft_start:, ]
        T_step = bn.defaultclock.dt

        holder = {
            'Pe_output': Pe_output,
            'Pe_input': Pe_input,
            'Pe_V': Pe_V,
            'Pe_glut': Pe_glut,
            'Pe_gaba': Pe_gaba,
            'T_step': T_step
        }
        scipy.io.savemat(fft_file + 'delta_u' + str(delta_u) + '_' + str(rep),
                         mdict=holder)
    else:
        holder = {
            'Pe_rate': Pe_ratemon.rate,
            'Pe_time': Pe_ratemon.times,
            'uee1': uee1,
            'uee2': uee2,
            'uie1': uie1,
            'uie2': uie2
        }
        scipy.io.savemat(rate_file + 'delta_q_' + str(delta_u) + '_' +
                         str(run_num) + 'rep' + str(rep),
                         mdict=holder)
    bn.clear(erase=True, all=True)
	'''

eqs_stdp = '''
	dpre/dt = -pre / tc_pre_ee : 1.0
	dpost/dt = -post / tc_post_ee : 1.0
	'''

eqs_stdp_pre = 'pre = 1.; w -= nu_ee_pre * post'
eqs_stdp_post = 'w += nu_ee_post * pre; post = 1.'

print('Creating neuron groups.')
groups = {}
groups['e'] = b.NeuronGroup(6400,
                            eqs_e,
                            threshold=v_thresh_e,
                            refractory=refrac_e,
                            reset=scr_e,
                            compile=True,
                            freeze=True)
groups['i'] = b.NeuronGroup(6400,
                            eqs_i,
                            threshold=v_thresh_i,
                            refractory=refrac_i,
                            reset=v_reset_i,
                            compile=True,
                            freeze=True)

groups['Ae'] = groups['e'].subgroup(6400)
groups['Ai'] = groups['i'].subgroup(6400)

groups['Ae'].v = v_rest_e - 40. * b.mV
Exemple #23
0
    def __init__(self,
                 n_input=784,
                 conv_size=16,
                 conv_stride=4,
                 conv_features=50,
                 connectivity='all',
                 weight_dependence=False,
                 post_pre=True,
                 weight_sharing=False,
                 lattice_structure='4',
                 random_lattice_prob=0.0,
                 random_inhibition_prob=0.0):
        '''
		Constructor for the spiking convolutional neural network model.

		n_input: (flattened) dimensionality of the input data
		conv_size: side length of convolution windows used
		conv_stride: stride (horizontal and vertical) of convolution windows used
		conv_features: number of convolution features (or patches) used
		connectivity: connection style between patches; one of 'none', 'pairs', all'; more to be added
		weight_dependence: whether to use weight STDP with weight dependence
		post_pre: whether to use STDP with both post- and pre-synpatic traces
		weight_sharing: whether to impose that all neurons within a convolution patch share a common set of weights
		lattice_structure: lattice connectivity pattern between patches; one of 'none', '4', '8', and 'all'
		random_lattice_prob: probability of adding random additional lattice connections between patches
		random_inhibition_prob: probability of adding random additional inhibition edges from the inhibitory to excitatory population
		'''
        self.n_input, self.conv_size, self.conv_stride, self.conv_features, self.connectivity, self.weight_dependence, \
         self.post_pre, self.weight_sharing, self.lattice_structure, self.random_lattice_prob, self.random_inhibition_prob = \
         n_input, conv_size, conv_stride, conv_features, connectivity, weight_dependence, post_pre, weight_sharing, lattice_structure, \
         random_lattice_prob, random_inhibition_prob

        # number of inputs to the network
        self.n_input_sqrt = int(math.sqrt(self.n_input))
        self.n_excitatory_patch = (
            (self.n_input_sqrt - self.conv_size) / self.conv_stride + 1)**2
        self.n_excitatory = self.n_excitatory_patch * self.conv_features
        self.n_excitatory_patch_sqrt = int(math.sqrt(self.n_excitatory_patch))
        self.n_inhibitory_patch = self.n_excitatory_patch
        self.n_inhibitory = self.n_excitatory
        self.conv_features_sqrt = int(math.ceil(math.sqrt(self.conv_features)))

        # time (in seconds) per data example presentation and rest period in between
        self.single_example_time = 0.35 * b.second
        self.resting_time = 0.15 * b.second

        # set update intervals
        self.update_interval = 100
        self.weight_update_interval = 10
        self.print_progress_interval = 10

        # rest potential parameters, reset potential parameters, threshold potential parameters, and refractory periods
        v_rest_e, v_rest_i = -65. * b.mV, -60. * b.mV
        v_reset_e, v_reset_i = -65. * b.mV, -45. * b.mV
        v_thresh_e, v_thresh_i = -52. * b.mV, -40. * b.mV
        refrac_e, refrac_i = 5. * b.ms, 2. * b.ms

        # time constants, learning rates, max weights, weight dependence, etc.
        tc_pre_ee, tc_post_ee = 20 * b.ms, 20 * b.ms
        nu_ee_pre, nu_ee_post = 0.0001, 0.01
        exp_ee_post = exp_ee_pre = 0.2
        w_mu_pre, w_mu_post = 0.2, 0.2

        # parameters for neuron equations
        tc_theta = 1e7 * b.ms
        theta_plus = 0.05 * b.mV
        scr_e = 'v = v_reset_e; theta += theta_plus; timer = 0*ms'
        offset = 20.0 * b.mV
        v_thresh_e = '(v>(theta - offset + ' + str(
            v_thresh_e) + ')) * (timer>refrac_e)'

        # equations for neurons
        neuron_eqs_e = '''
				dv / dt = ((v_rest_e - v) + (I_synE + I_synI) / nS) / (100 * ms)  : volt
				I_synE = ge * nS * - v  : amp
				I_synI = gi * nS * (-100. * mV - v)  : amp
				dge / dt = -ge / (1.0*ms)  : 1
				dgi / dt = -gi / (2.0*ms)  : 1
				dtheta / dt = -theta / (tc_theta)  : volt
				dtimer / dt = 100.0  : ms
			'''

        neuron_eqs_i = '''
				dv/dt = ((v_rest_i - v) + (I_synE + I_synI) / nS) / (10*ms)  : volt
				I_synE = ge * nS *         -v                           : amp
				I_synI = gi * nS * (-85.*mV-v)                          : amp
				dge/dt = -ge/(1.0*ms)                                   : 1
				dgi/dt = -gi/(2.0*ms)                                  : 1
			'''

        # STDP synaptic traces
        eqs_stdp_ee = '''
				dpre / dt = -pre / tc_pre_ee : 1.0
				dpost / dt = -post / tc_post_ee : 1.0
			'''

        # dictionaries for weights and delays
        self.weight, self.delay = {}, {}

        # setting weight, delay, and intensity parameters
        self.weight['ee_input'] = (conv_size**2) * 0.175
        self.delay['ee_input'] = (0 * b.ms, 10 * b.ms)
        self.delay['ei_input'] = (0 * b.ms, 5 * b.ms)
        self.input_intensity = self.start_input_intensity = 2.0
        self.wmax_ee = 1.0

        # populations, connections, saved connections, etc.
        self.input_population_names = ['X']
        self.population_names = ['A']
        self.input_connection_names = ['XA']
        self.save_connections = ['XeAe', 'AeAe']
        self.input_connection_names = ['ee_input']
        self.recurrent_connection_names = ['ei', 'ie', 'ee']

        # setting STDP update rule
        if weight_dependence:
            if post_pre:
                eqs_stdp_pre_ee = 'pre = 1.; w -= nu_ee_pre * post * w ** exp_ee_pre'
                eqs_stdp_post_ee = 'w += nu_ee_post * pre * (wmax_ee - w) ** exp_ee_post; post = 1.'

            else:
                eqs_stdp_pre_ee = 'pre = 1.'
                eqs_stdp_post_ee = 'w += nu_ee_post * pre * (wmax_ee - w) ** exp_ee_post; post = 1.'

        else:
            if post_pre:
                eqs_stdp_pre_ee = 'pre = 1.; w -= nu_ee_pre * post'
                eqs_stdp_post_ee = 'w += nu_ee_post * pre; post = 1.'

            else:
                eqs_stdp_pre_ee = 'pre = 1.'
                eqs_stdp_post_ee = 'w += nu_ee_post * pre; post = 1.'

        print '\n'

        # for filesaving purposes
        stdp_input = ''
        if self.weight_dependence:
            stdp_input += 'weight_dependence_'
        else:
            stdp_input += 'no_weight_dependence_'
        if self.post_pre:
            stdp_input += 'post_pre'
        else:
            stdp_input += 'no_post_pre'
        if self.weight_sharing:
            use_weight_sharing = 'weight_sharing'
        else:
            use_weight_sharing = 'no_weight_sharing'

        # set ending of filename saves
        self.ending = self.connectivity + '_' + str(self.conv_size) + '_' + str(self.conv_stride) + '_' + str(self.conv_features) + \
             '_' + str(self.n_excitatory_patch) + '_' + stdp_input + '_' + \
             use_weight_sharing + '_' + str(self.lattice_structure) + '_' + str(self.random_lattice_prob) + \
             '_' + str(self.random_inhibition_prob)

        self.fig_num = 1

        # creating dictionaries for various objects
        self.neuron_groups, self.input_groups, self.connections, self.input_connections, self.stdp_methods, self.rate_monitors, \
         self.spike_monitors, self.spike_counters, self.output_numbers = {}, {}, {}, {}, {}, {}, {}, {}, {}

        # creating convolution locations inside the input image
        self.convolution_locations = {}
        for n in xrange(self.n_excitatory_patch):
            self.convolution_locations[n] = [ ((n % self.n_excitatory_patch_sqrt) * self.conv_stride + (n // self.n_excitatory_patch_sqrt) \
                      * self.n_input_sqrt * self.conv_stride) + (x * self.n_input_sqrt) + y \
                      for y in xrange(self.conv_size) for x in xrange(self.conv_size) ]

        # instantiating neuron spike / votes monitor
        self.result_monitor = np.zeros(
            (self.update_interval, self.conv_features,
             self.n_excitatory_patch))

        # creating overarching neuron populations
        self.neuron_groups['e'] = b.NeuronGroup(self.n_excitatory, neuron_eqs_e, threshold=v_thresh_e, \
                     refractory=refrac_e, reset=scr_e, compile=True, freeze=True)
        self.neuron_groups['i'] = b.NeuronGroup(self.n_inhibitory, neuron_eqs_i, threshold=v_thresh_i, \
                     refractory=refrac_i, reset=v_reset_i, compile=True, freeze=True)

        # create neuron subpopulations
        for name in self.population_names:
            print '...creating neuron group:', name

            # get a subgroup of size 'n_e' from all exc
            self.neuron_groups[name + 'e'] = self.neuron_groups['e'].subgroup(
                self.conv_features * self.n_excitatory_patch)
            # get a subgroup of size 'n_i' from the inhibitory layer
            self.neuron_groups[name + 'i'] = self.neuron_groups['i'].subgroup(
                self.conv_features * self.n_excitatory_patch)

            # start the membrane potentials of these groups 40mV below their resting potentials
            self.neuron_groups[name + 'e'].v = v_rest_e - 40. * b.mV
            self.neuron_groups[name + 'i'].v = v_rest_i - 40. * b.mV

        print '...creating recurrent connections'

        for name in self.population_names:
            # set the adaptive additive threshold parameter at 20mV
            self.neuron_groups['e'].theta = np.ones(
                (self.n_excitatory)) * 20.0 * b.mV

            for connection_type in self.recurrent_connection_names:
                if connection_type == 'ei':
                    # create connection name (composed of population and connection types)
                    connection_name = name + connection_type[
                        0] + name + connection_type[1]
                    # create a connection from the first group in conn_name with the second group
                    self.connections[connection_name] = b.Connection(self.neuron_groups[connection_name[0:2]], \
                            self.neuron_groups[connection_name[2:4]], structure='sparse', state='g' + conn_type[0])
                    # instantiate the created connection
                    for feature in xrange(self.conv_features):
                        for n in xrange(self.n_excitatory_patch):
                            self.connections[conn_name][feature * self.n_excitatory_patch + n, \
                                    feature * self.n_excitatory_patch + n] = 10.4

                elif connection_type == 'ie':
                    # create connection name (composed of population and connection types)
                    connection_name = name + connection_type[
                        0] + name + connection_type[1]
                    # create a connection from the first group in conn_name with the second group
                    self.connections[connection_name] = b.Connection(self.neuron_groups[connection_name[0:2]], \
                            self.neuron_groups[connection_name[2:4]], structure='sparse', state='g' + conn_type[0])
                    # instantiate the created connection
                    for feature in xrange(self.conv_features):
                        for other_feature in xrange(self.conv_features):
                            if feature != other_feature:
                                for n in xrange(self.n_excitatory_patch):
                                    self.connections[connection_name][feature * self.n_excitatory_patch + n, \
                                          other_feature * self.n_excitatory_patch + n] = 17.4

                    # adding random inhibitory connections as specified
                    if self.random_inhibition_prob != 0.0:
                        for feature in xrange(self.conv_features):
                            for other_feature in xrange(self.conv_features):
                                for n_this in xrange(self.n_excitatory_patch):
                                    for n_other in xrange(
                                            self.n_excitatory_patch):
                                        if n_this != n_other:
                                            if b.random(
                                            ) < self.random_inhibition_prob:
                                                self.connections[connection_name][feature * self.n_excitatory_patch + n_this, \
                                                  other_feature * self.n_excitatory_patch + n_other] = 17.4

                elif connection_type == 'ee':
                    # create connection name (composed of population and connection types)
                    connection_name = name + connection_type[
                        0] + name + connection_type[1]
                    # create a connection from the first group in conn_name with the second group
                    self.connections[connection_name] = b.Connection(self.neuron_groups[connection_name[0:2]], \
                       self.neuron_groups[connection_name[2:4]], structure='sparse', state='g' + connection_type[0])
                    # instantiate the created connection
                    if self.connectivity == 'all':
                        for feature in xrange(self.conv_features):
                            for other_feature in xrange(self.conv_features):
                                if feature != other_feature:
                                    for this_n in xrange(
                                            self.n_excitatory_patch):
                                        for other_n in xrange(
                                                self.n_excitatory_patch):
                                            if is_lattice_connection(
                                                    self.
                                                    n_excitatory_patch_sqrt,
                                                    this_n, other_n):
                                                self.connections[connection_name][feature * self.n_excitatory_patch + this_n, \
                                                  other_feature * self.n_excitatory_patch + other_n] = \
                                                    (b.random() + 0.01) * 0.3

                    elif self.connectivity == 'pairs':
                        for feature in xrange(self.conv_features):
                            if feature % 2 == 0:
                                for this_n in xrange(self.n_excitatory_patch):
                                    for other_n in xrange(
                                            self.n_excitatory_patch):
                                        if is_lattice_connection(
                                                self.n_excitatory_patch_sqrt,
                                                this_n, other_n):
                                            self.connections[connection_name][feature * self.n_excitatory_patch + this_n, \
                                              (feature + 1) * self.n_excitatory_patch + other_n] = (b.random() + 0.01) * 0.3
                            elif feature % 2 == 1:
                                for this_n in xrange(self.n_excitatory_patch):
                                    for other_n in xrange(
                                            self.n_excitatory_patch):
                                        if is_lattice_connection(
                                                self.n_excitatory_patch_patch,
                                                this_n, other_n):
                                            self.connections[connection_name][feature * self.n_excitatory_patch + this_n, \
                                              (feature - 1) * self.n_excitatory_patch + other_n] = (b.random() + 0.01) * 0.3

                    elif connectivity == 'linear':
                        for feature in xrange(self.conv_features):
                            if feature != self.conv_features - 1:
                                for this_n in xrange(self.n_excitatory_patch):
                                    for other_n in xrange(
                                            self.n_excitatory_patch):
                                        if is_lattice_connection(
                                                self.n_excitatory_patch_sqrt,
                                                this_n, other_n):
                                            self.connections[connection_name][feature * self.n_excitatory_patch + this_n, \
                                              (feature + 1) * self.n_excitatory_patch + other_n] = \
                                                 (b.random() + 0.01) * 0.3
                            if feature != 0:
                                for this_n in xrange(self.n_excitatory_patch):
                                    for other_n in xrange(
                                            self.n_excitatory_patch):
                                        if is_lattice_connection(
                                                self.n_excitatory_patch_sqrt,
                                                this_n, other_n):
                                            self.connections[connection_name][feature * self.n_excitatory_patch + this_n, \
                                              (feature - 1) * self.n_excitatory_patch + other_n] = \
                                                 (b.random() + 0.01) * 0.3

                    elif self.connectivity == 'none':
                        pass

            # if STDP from excitatory -> excitatory is on and this connection is excitatory -> excitatory
            if 'ee' in self.recurrent_conn_names:
                self.stdp_methods[name + 'e' + name + 'e'] = b.STDP(self.connections[name + 'e' + name + 'e'], \
                            eqs=eqs_stdp_ee, pre=eqs_stdp_pre_ee, \
                            post=eqs_stdp_post_ee, wmin=0., wmax=self.wmax_ee)

            print '...creating monitors for:', name

            # spike rate monitors for excitatory and inhibitory neuron populations
            self.rate_monitors[name + 'e'] = b.PopulationRateMonitor(self.neuron_groups[name + 'e'], \
                      bin=(self.single_example_time + self.resting_time) / b.second)
            self.rate_monitors[name + 'i'] = b.PopulationRateMonitor(self.neuron_groups[name + 'i'], \
                      bin=(self.single_example_time + self.resting_time) / b.second)
            self.spike_counters[name + 'e'] = b.SpikeCounter(
                self.neuron_groups[name + 'e'])

            # record neuron population spikes
            self.spike_monitors[name + 'e'] = b.SpikeMonitor(
                self.neuron_groups[name + 'e'])
            self.spike_monitors[name + 'i'] = b.SpikeMonitor(
                self.neuron_groups[name + 'i'])

        if do_plot:
            b.figure(self.fig_num)
            fig_num += 1
            b.ion()
            b.subplot(211)
            b.raster_plot(self.spike_monitors['Ae'],
                          refresh=1000 * b.ms,
                          showlast=1000 * b.ms)
            b.subplot(212)
            b.raster_plot(self.spike_monitors['Ai'],
                          refresh=1000 * b.ms,
                          showlast=1000 * b.ms)

        # specifying locations of lattice connections
        self.lattice_locations = {}
        if self.connectivity == 'all':
            for this_n in xrange(self.conv_features * self.n_excitatory_patch):
                self.lattice_locations[this_n] = [ other_n for other_n in xrange(self.conv_features * self.n_excitatory_patch) \
                        if is_lattice_connection(self.n_excitatory_patch_sqrt, \
                        this_n % self.n_excitatory_patch, other_n % self.n_excitatory_patch) ]
        elif self.connectivity == 'pairs':
            for this_n in xrange(self.conv_features * self.n_excitatory_patch):
                self.lattice_locations[this_n] = []
                for other_n in xrange(self.conv_features *
                                      self.n_excitatory_patch):
                    if this_n // self.n_excitatory_patch % 2 == 0:
                        if is_lattice_connection(self.n_excitatory_patch_sqrt, this_n % self.n_excitatory_patch, \
                               other_n % self.n_excitatory_patch) and \
                               other_n // self.n_excitatory_patch == this_n // self.n_excitatory_patch + 1:
                            self.lattice_locations[this_n].append(other_n)
                    elif this_n // self.n_excitatory_patch % 2 == 1:
                        if is_lattice_connection(self.n_excitatory_patch_sqrt, this_n % self.n_excitatory_patch, \
                               other_n % self.n_excitatory_patch) and \
                               other_n // self.n_excitatory_patch == this_n // self.n_excitatory_patch - 1:
                            self.lattice_locations[this_n].append(other_n)
        elif self.connectivity == 'linear':
            for this_n in xrange(self.conv_features * self.n_excitatory_patch):
                self.lattice_locations[this_n] = []
                for other_n in xrange(conv_features * self.n_excitatory_patch):
                    if this_n // self.n_excitatory_patch != self.conv_features - 1:
                        if is_lattice_connection(self.n_excitatory_patch_sqrt, this_n % self.n_excitatory_patch, \
                               other_n % self.n_excitatory_patch) and \
                               other_n // self.n_excitatory_patch == this_n // self.n_excitatory_patch + 1:
                            self.lattice_locations[this_n].append(other_n)
                    elif this_n // self.n_excitatory_patch != 0:
                        if is_lattice_connection(self.n_excitatory_patch_sqrt, this_n % self.n_excitatory_patch, \
                               other_n % self.n_excitatory_patch) and \
                               other_n // self.n_excitatory_patch == this_n // self.n_excitatory_patch - 1:
                            self.lattice_locations[this_n].append(other_n)

        # setting up parameters for weight normalization between patches
        num_lattice_connections = sum(
            [len(value) for value in lattice_locations.values()])
        self.weight['ee_recurr'] = (num_lattice_connections /
                                    self.conv_features) * 0.15

        # creating Poission spike train from input image (784 vector, 28x28 image)
        for name in self.input_population_names:
            self.input_groups[name + 'e'] = b.PoissonGroup(self.n_input, 0)
            self.rate_monitors[name + 'e'] = b.PopulationRateMonitor(self.input_groups[name + 'e'], \
                       bin=(self.single_example_time + self.resting_time) / b.second)

        # creating connections from input Poisson spike train to convolution patch populations
        for name in self.input_connection_names:
            print '\n...creating connections between', name[0], 'and', name[1]

            # for each of the input connection types (in this case, excitatory -> excitatory)
            for connection_type in self.input_conn_names:
                # saved connection name
                connection_name = name[0] + connection_type[0] + name[
                    1] + connection_type[1]

                # create connections from the windows of the input group to the neuron population
                self.input_connections[connection_name] = b.Connection(self.input_groups['Xe'], \
                    self.neuron_groups[name[1] + connection_type[1]], structure='sparse', \
                    state='g' + connection_type[0], delay=True, max_delay=self.delay[connection_type][1])

                for feature in xrange(self.conv_features):
                    for n in xrange(self.n_excitatory_patch):
                        for idx in xrange(self.conv_size**2):
                            self.input_connections[connection_name][self.convolution_locations[n][idx], \
                                 feature * self.n_excitatory_patch + n] = (b.random() + 0.01) * 0.3

            # if excitatory -> excitatory STDP is specified, add it here (input to excitatory populations)
            print '...creating STDP for connection', name

            # STDP connection name
            connection_name = name[0] + connection_type[0] + name[
                1] + connection_type[1]
            # create the STDP object
            self.stdp_methods[connection_name] = b.STDP(self.input_connections[connection_name], \
              eqs=eqs_stdp_ee, pre=eqs_stdp_pre_ee, post=eqs_stdp_post_ee, wmin=0., wmax=self.wmax_ee)

        print '\n'