Exemplo n.º 1
0
 def _execute(self, x):
     #self.G.I = brian.TimedArray(10000 * x * brian.mV, dt=1 * brian.ms)
     self.G.I = brian.TimedArray(100 * scipy.dot(x, self.w_in.T) * brian.mV,
                                 dt=1 * brian.ms)
     self.network = brian.Network(self.G, self.Mv, self.Ms)
     self.network.reinit()
     self.network.run((x.shape[0] + 1) * brian.ms)
     retval = self.Mv.values[:, 0:x.shape[0]].T
Exemplo n.º 2
0
def test_yang2009():
    tmax = 0.1

    ts = np.arange(0, tmax, 10e-3)

    ws = _double_exp_recovery_synapse(ts=ts,
                                      k=0.6,
                                      U=0.47,
                                      tau_fast=26e-3,
                                      tau_slow=1)

    anf_trains = pd.DataFrame([
        {
            'cf': 100,
            'duration': tmax,
            'spikes': ts,
            'type': 'hsr'
        },
        {
            'cf': 100,
            'duration': tmax,
            'spikes': ts + 2e-3,
            'type': 'hsr'
        },
    ])
    anfs = cn.ANFs(anf_trains)

    gbcs = cn.GBCs_RothmanManis2003(cfs=[100],
                                    convergences=(2, 0, 0),
                                    endbulb_class='yang2009')

    gbcs.connect_anfs(anfs, weights=(1, 0, 0))

    for obj in gbcs.brian_objects:
        if isinstance(obj, brian.Synapses):
            synapses = obj

    g_syn = brian.StateMonitor(
        synapses,
        'g_syn',
        record=[1]  # `1' because the 2nd synapse
        # (index=1) gets the first ANF while
        # (randomly) connection ANF
    )
    g_syn_tot = brian.StateMonitor(gbcs.group, 'g_syn_tot', record=True)

    net = brian.Network(gbcs.brian_objects, anfs.brian_objects, g_syn,
                        g_syn_tot)
    net.run(tmax * second, report='text', report_period=1)

    g_syn_interp = interpolate.interp1d(g_syn.times, g_syn.values[0])
    g_syn_tot_interp = interpolate.interp1d(g_syn_tot.times,
                                            g_syn_tot.values[0])

    npt.assert_array_almost_equal(g_syn_interp(ts), ws)
    npt.assert_array_almost_equal(g_syn_tot_interp(ts), ws)
Exemplo n.º 3
0
 def __init__(self, timestep, min_delay, max_delay):
     """Initialize the simulator."""
     self.network = brian.Network()
     self._set_dt(timestep)
     self.initialized = True
     self.num_processes = 1
     self.mpi_rank = 0
     self.min_delay = min_delay
     self.max_delay = max_delay
     self.gid = 0
 def clear(self):
     self.recorders = set([])
     self.id_counter = 0
     self.segment_counter = -1
     if self.network:
         for item in self.network.groups + self.network._all_operations:
             del item
     self.network = brian.Network()
     self.network.clock = brian.Clock()
     self.reset()
Exemplo n.º 5
0
 def __init__(self, timestep, min_delay, max_delay):
     """Initialize the simulator."""
     self.network       = brian.Network()
     self.network.clock = brian.Clock(t=0*ms, dt=timestep*ms)
     self.initialized   = True
     self.num_processes = 1
     self.mpi_rank      = 0
     self.min_delay     = min_delay
     self.max_delay     = max_delay
     self.gid           = 0
Exemplo n.º 6
0
def run(duration, objects, **kwargs):
    """Run Brian simulation

    Parameters
    ----------
    duration : float
        Duration of the simulation in seconds.
    objects : list
        A collection of Brian objects to be simulated.

    """

    brian.defaultclock.t = 0 * second

    net = brian.Network(objects)

    kwargs.setdefault('report', 'text')
    kwargs.setdefault('report_period', 1)
    net.run(duration * second, **kwargs)
Exemplo n.º 7
0
def main():

    fs = 100e3
    cf = 1e3
    tmax = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tmax,
        pad=30e-3,
        dbspl=50,
    )

    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        cf=cf,
        anf_num=(300, 0, 0),
        seed=0,
        species='cat',
    )

    anfs = cochlea.make_brian_group(anf_trains)

    print(anfs)

    brainstem = make_brainstem_group(100)

    print(brainstem)

    monitor = brian.SpikeMonitor(brainstem)

    net = brian.Network([brainstem, monitor])
    net.run(tmax * second)

    brian.raster_plot(monitor)
    brian.show()
Exemplo n.º 8
0
eq2+=brian.Equations('erev :1')
eq2+=brian.Equations('gch :1')
g2=brian.NeuronGroup(1, model=eq2)

#g1.g_ch = brian.linked_var(g1, 'g') #uncomment this to make it work

g2.gch = brian.linked_var(g1, 'g') #comment this to make it work
#g2.gch = brian.linked_var(g1, 'g_ch') #uncomment this to make it work

s1=brian.StateMonitor(g1, 'g', record=0)
s2=brian.StateMonitor(g1, 'c0', record=0)

s3=brian.StateMonitor(g2, 'v', record=0)
s4=brian.StateMonitor(g2, 'gch', record=0)

s5=brian.StateMonitor(g1, 'gmax', record=0)

#initialize
g1.g=0.0 #commenting this ALONE will make it work
#g1.g_ch=0.0 #uncomment this to make it work
g1.gmax=2.0
g2.erev=2.0

n=brian.Network()
n.add(g1)
n.add(g2)
n.add((s1, s2, s3, s4, s5))
n.run(1)

plot(s4.times, s4[0])
show()
def run_sim(ffExcInputMult=None, ffInhInputMult=None):
    """Run the cond-based LIF neuron simulation.  Takes a few minutes to construct network and run


    Parameters
    ----------
    ffExcInputMult: scalar: FF input magnitude to E cells.  multiply ffInputV by this value and connect to E cells
    ffInhInputMult: scalar: FF input magnitude to I cells.

    Returns
    -------
    outDict - spike times, records of continuous values from simulation

    """

    # use helper to get input timecourses
    (ffInputV, condAddV) = create_input_vectors(
        doDebugPlot=False)  # multiplied by scalars below

    # setup initial state
    stT = time.time()
    brian.set_global_preferences(usecodegen=True)
    brian.set_global_preferences(useweave=True)
    brian.set_global_preferences(usecodegenweave=True)
    brian.clear(erase=True, all=True)
    brian.reinit_default_clock()
    clk = brian.Clock(dt=0.05 * ms)

    ################

    # create neurons, define connections
    neurNetwork = brian.NeuronGroup(nNet,
                                    model=eqs,
                                    threshold=vthresh,
                                    reset=vrest,
                                    refractory=absRefractoryMs * msecond,
                                    order=1,
                                    compile=True,
                                    freeze=False,
                                    clock=clk)

    # create neuron pools
    neurCE = neurNetwork.subgroup(nExc)
    neurCI = neurNetwork.subgroup(nInh)
    connCE = brian.Connection(neurCE, neurNetwork, 'ge')
    connCI = brian.Connection(neurCI, neurNetwork, 'gi')
    print('n cells: %d, nE,I %d,%d, %s, absRefractoryMs: %d' %
          (nNet, nExc, nInh, repr(clk), absRefractoryMs))

    # connect the network to itself
    connCE.connect_random(neurCE,
                          neurNetwork,
                          internalSparseness,
                          weight=connENetWeight)
    connCI.connect_random(neurCI,
                          neurNetwork,
                          internalSparseness,
                          weight=connINetWeight)

    # connect inputs that change spont rate
    assert (
        spontAddRate <= 0
    ), 'Spont add rate should be negative - convention: neg, excite inhibitory cells'
    spontAddNInpSyn = 100
    nTotalSpontNeurons = (spontAddNInpSyn * nInh * 0.02)
    neurSpont = brian.PoissonGroup(nTotalSpontNeurons,
                                   -1.0 * spontAddRate * Hz)
    connCSpont = brian.Connection(neurSpont, neurCI, 'ge')
    connCSpont.connect_random(
        p=spontAddNInpSyn * 1.0 / nTotalSpontNeurons,
        weight=connENetWeight,  # match internal excitatory strengths
        fixed=True)

    # connect the feedforward visual (poisson) inputs to excitatory cells (ff E)
    ffExcInputNInpSyn = 100
    nTotalFfNeurons = (ffExcInputNInpSyn * ffExcInputNTargs * 0.02
                       )  # one pop of input cells for both E and I FF
    _ffExcInputV = ffExcInputMult * np.abs(a_(ffInputV).copy())
    assert (np.all(
        _ffExcInputV >= 0)), 'Negative FF rates are rectified to zero'
    neurFfExcInput = brian.PoissonGroup(
        nTotalFfNeurons, lambda t: _ffExcInputV[int(t * 1000)] * Hz)
    connCFfExcInput = brian.Connection(neurFfExcInput, neurNetwork, 'ge')
    connCFfExcInput.connect_random(neurFfExcInput,
                                   neurCE[0:ffExcInputNTargs],
                                   ffExcInputNInpSyn * 1.0 / nTotalFfNeurons,
                                   weight=connENetWeight,
                                   fixed=True)

    # connect the feedforward visual (poisson) inputs to inhibitory cells (ff I)
    ffInhInputNInpSyn = 100
    _ffInhInputV = ffInhInputMult * np.abs(ffInputV.copy())
    assert (np.all(
        _ffInhInputV >= 0)), 'Negative FF rates are rectified to zero'
    neurFfInhInput = brian.PoissonGroup(
        nTotalFfNeurons, lambda t: _ffInhInputV[int(t * 1000)] * Hz)
    connCFfInhInput = brian.Connection(neurFfInhInput, neurNetwork, 'ge')
    connCFfInhInput.connect_random(
        neurFfInhInput,
        neurCI[0:ffInhInputNTargs],
        ffInhInputNInpSyn * 1.0 / nTotalFfNeurons,  # sparseness
        weight=connENetWeight,
        fixed=True)

    # connect added step (ChR2) conductance to excitatory cells
    condAddAmp = 4.0
    gAdd = brian.TimedArray(condAddAmp * condAddV, dt=1 * ms)
    print('Adding conductance for %d cells (can be slow): ' %
          len(condAddNeurNs),
          end=' ')
    for (iN, tN) in enumerate(condAddNeurNs):
        neurCE[tN].gAdd = gAdd
    print('done')

    # Initialize using some randomness so all neurons don't start in same state.
    # Alternative: initialize with constant values, give net extra 100-300ms to evolve from initial state.
    neurNetwork.v = (brian.randn(1) * 5.0 - 65) * mvolt
    neurNetwork.ge = brian.randn(nNet) * 1.5 + 4
    neurNetwork.gi = brian.randn(nNet) * 12 + 20

    # Record continuous variables and spikes
    monSTarg = brian.SpikeMonitor(neurNetwork)
    if contRecNs is not None:
        contRecClock = brian.Clock(dt=contRecStepMs * ms)
        monVTarg = brian.StateMonitor(neurNetwork,
                                      'v',
                                      record=contRecNs,
                                      clock=contRecClock)
        monGETarg = brian.StateMonitor(neurNetwork,
                                       'ge',
                                       record=contRecNs,
                                       clock=contRecClock)
        monGAddTarg = brian.StateMonitor(neurNetwork,
                                         'gAdd',
                                         record=contRecNs,
                                         clock=contRecClock)
        monGITarg = brian.StateMonitor(neurNetwork,
                                       'gi',
                                       record=contRecNs,
                                       clock=contRecClock)

    # construct brian.Network before running (so brian explicitly knows what to update during run)
    netL = [
        neurNetwork, connCE, connCI, monSTarg, neurFfExcInput, connCFfExcInput,
        neurFfInhInput, connCFfInhInput, neurSpont, connCSpont
    ]
    if contRecNs is not None:
        # noinspection PyUnboundLocalVariable
        netL.append([monVTarg, monGETarg, monGAddTarg,
                     monGITarg])  # cont monitors
    net = brian.Network(netL)
    print("Network construction time: %3.1f seconds" % (time.time() - stT))

    # run
    print("Simulation running...")
    sys.stdout.flush()
    start_time = time.time()
    net.run(simRunTimeS * second, report='text', report_period=30.0 * second)
    durationS = time.time() - start_time
    print("Simulation time: %3.1f seconds" % durationS)

    outNTC = collections.namedtuple(
        'outNTC',
        'vm ge gadd gi clockDtS clockStartS clockEndS spiketimes contRecNs')
    outNTC.__new__.__defaults__ = (None, ) * len(
        outNTC._fields)  # default to None
    outNT = outNTC(clockDtS=float(monSTarg.clock.dt),
                   clockStartS=float(monSTarg.clock.start),
                   clockEndS=float(monSTarg.clock.end),
                   spiketimes=a_(monSTarg.spiketimes.values(), dtype='O'),
                   contRecNs=contRecNs)
    if contRecNs is not None:
        outNT = outNT._replace(vm=monVTarg.values,
                               ge=monGETarg.values,
                               gadd=monGAddTarg.values,
                               gi=monGITarg.values)
    return outNT
Exemplo n.º 10
0
            M = bc.delay
            units = ms
        else:
            raise Exception(
                "Setting parameters other than weight and delay not yet supported."
            )
        if common.is_number(value):
            for row in M.data:
                for i in range(len(row)):
                    row[i] = value * units
        elif isinstance(value, numpy.ndarray) and len(value.shape) == 2:
            address_gen = ((i, j) for i, row in enumerate(bc.W.rows)
                           for j in row)
            for (i, j) in address_gen:
                M[i, j] = value[i, j] * units
        elif common.is_listlike(value):
            assert len(value) == M.getnnz()
            address_gen = ((i, j) for i, row in enumerate(bc.W.rows)
                           for j in row)
            for ((i, j), val) in izip(address_gen, value):
                M[i, j] = val * units
        else:
            raise Exception("Values must be scalars or lists/arrays")


# --- Initialization, and module attributes ------------------------------------

state = _State()  # a Singleton, so only a single instance ever exists
del _State
net = brian.Network()
Exemplo n.º 11
0
objects.append(S_in)
objects.append(S_out)
for i in range(len(hidden_neurons)):
    objects.append(S_hidden[i])

for i in range(len(N_hidden)):
    objects.append(Sa[i])

objects.append(Sb)

objects.append(M)
objects.append(Mv)
objects.append(Mu)

#pudb.set_trace()
net = br.Network(objects)
'''         TRAINING        '''
#Net = br.Network(objects)
#OUT = open('weights.txt', 'a')

number = 3
T = 60
N_o = 1
N_h = 1

print "======================================================================"
print "\t\t\tSetting number of spikes"
print "======================================================================"

#if op.isfile(weight_file):
#    #pudb.set_trace()
Exemplo n.º 12
0
    def __init__(self,
                 input_dim,
                 output_dim,
                 dtype,
                 input_scaling=100,
                 input_conn_frac=.5,
                 dt=1,
                 we_scaling=2,
                 wi_scaling=.5,
                 we_sparseness=.1,
                 wi_sparseness=.1):
        super(BrianIFReservoirNode, self).__init__(input_dim=input_dim,
                                                   output_dim=output_dim,
                                                   dtype=dtype)
        self.taum = 20 * brian.ms
        self.taue = 5 * brian.ms
        self.taui = 10 * brian.ms
        self.Vt = 15 * brian.mV
        self.Vr = 0 * brian.mV
        self.frac_e = .75
        self.input_scaling = input_scaling
        self.input_conn_frac = input_conn_frac
        self.dt = dt
        self.we_scaling = we_scaling
        self.wi_scaling = wi_scaling
        self.we_sparseness = we_sparseness
        self.wi_sparseness = wi_sparseness

        self.eqs = brian.Equations('''
              dV/dt  = (I-V+ge-gi)/self.taum : volt
              dge/dt = -ge/self.taue    : volt 
              dgi/dt = -gi/self.taui    : volt
              I: volt
              ''')
        self.G = brian.NeuronGroup(N=output_dim,
                                   model=self.eqs,
                                   threshold=self.Vt,
                                   reset=self.Vr)
        self.Ge = self.G.subgroup(int(scipy.floor(
            output_dim * self.frac_e)))  # Excitatory neurons
        self.Gi = self.G.subgroup(
            int(scipy.floor(output_dim * (1 - self.frac_e))))

        self.internal_conn = brian.Connection(self.G, self.G)
        self.we = self.we_scaling * scipy.random.rand(len(self.Ge), len(
            self.G)) * brian.nS
        self.wi = self.wi_scaling * scipy.random.rand(len(self.Ge), len(
            self.G)) * brian.nS

        self.Ce = brian.Connection(self.Ge,
                                   self.G,
                                   'ge',
                                   sparseness=self.we_sparseness,
                                   weight=self.we)
        self.Ci = brian.Connection(self.Gi,
                                   self.G,
                                   'gi',
                                   sparseness=self.wi_sparseness,
                                   weight=self.wi)

        #self.internal_conn.connect(self.G, self.G, self.w_res)

        self.Mv = brian.StateMonitor(self.G, 'V', record=True, timestep=10)
        self.Ms = brian.SpikeMonitor(self.G, record=True)
        self.w_in = self.input_scaling * (scipy.random.rand(
            self.output_dim, self.input_dim)) * (scipy.random.rand(
                self.output_dim, self.input_dim) < self.input_conn_frac)
        self.network = brian.Network(self.G, self.Ce, self.Ci, self.Ge,
                                     self.Gi, self.Mv, self.Ms)
Exemplo n.º 13
0
def run(duration, objects, **kwargs):
    """Run a brian simulation

    This is a convenience function to easily
    run a brian network while also offering a flexible
    way of introducing Monitors.

    Parameters
    ----------
    duration : float
        Duration of the simulation in seconds.

    objects : list
        A collection of Brian objects to be simulated.

    export_dict : dict
        A dictionary of state trace to export. The Key gives
        the neuron group to read the state from and the value
        is a list of strings naming the state variables.
        {neuron1:['v', 'n']} for example would create state
        monitors for the variables v and n of neuron1

    **kwargs :
       Further kwargs ar passed to the brian network run function

    Returns
    -------
    If export_dict is given as a kwarg, The function this dictionary
    where the keys has been replaced by a pandas.DataFrame that contains
    the time and value treaces of the requested variables.

    """

    import pandas

    # Reset brian defaultclock
    brian.defaultclock.t = 0 * second

    # If export_dict is given, create a number
    # of State Monitors for the requested variables
    monitor_objects = []
    monitor_dict = {}
    if "export_dict" in kwargs:
        export_dict = kwargs.pop("export_dict")

        for o, l in export_dict.iteritems():
            monitor_dict[o] = []
            for i in l:
                monitor = brian.StateMonitor(o, i, record=True)
                monitor_dict[o].append(monitor)
                monitor_objects.append(monitor)

    net = brian.Network(objects + monitor_objects)

    kwargs.setdefault('report', 'text')
    kwargs.setdefault('report_period', 1)

    net.run(duration * second, **kwargs)

    for o, l in monitor_dict.iteritems():
        pds_dict = {}
        for i, m in enumerate(l):
            var_name = export_dict[o][i]
            data = m.values
            pds_dict[var_name] = list(data)
        pds_dict['time'] = len(data) * [m.times]
        pds_frame = pandas.DataFrame(pds_dict)
        monitor_dict[o] = pds_frame

    return monitor_dict
Exemplo n.º 14
0
    def _return_generator(self, simulation):
        '''
        Defines a simulation using a python generator.
        '''

        import brian
        import numpy

        print "Starting the simulation!"

        print "Reseting the Brian Simulation object...",
        brian.reinit(
        )  # This is only necessary when using the same enviroment over and over (like with iPython).
        print "Done!"

        clock_mult = self.step_size
        brian.defaultclock.dt = clock_mult * brian.ms

        print "Initial simulation time:", brian.defaultclock.t
        print "Simulation step:", brian.defaultclock.dt

        # Calls the user function with the Brian objects to be used in the simulation
        Input_layer, Output_layer, pop_objects, syn_objects, monitors_objects = simulation(
            brian.defaultclock, brian)

        output_spikes = []
        output_spikes_time = []

        # Every time spikes occur at the SpikeMonitor related to the output neuron group, this function is called
        def output_spikes_proc(spikes):
            if len(spikes):
                output_spikes.append(spikes.tolist(
                ))  # Saves the indexes of the neurons who generated spikes
                output_spikes_time.append(
                    1000 * float(brian.defaultclock.t)
                )  # Converts and save the actual time in ms
                # The spike monitor and all this code could be replaced by the .get_spikes() method of neurongroups.
                # I need to check what is fastest way!

        OutputMonitor = brian.SpikeMonitor(Output_layer,
                                           record=False,
                                           function=output_spikes_proc)
        # Because it is not saving, the system is not going to run out of memory after a long simulation.

        net = brian.Network(pop_objects + syn_objects + monitors_objects +
                            [OutputMonitor])

        r = 0
        while True:
            spiketimes = yield  # Receives the content from the Python generator method .send()
            if spiketimes:
                spiketimes = [
                    (i, brian.defaultclock.t) for i in spiketimes
                ]  # The spikes received are inserted as the last simulated time
                Input_layer.set_spiketimes(spiketimes)
            net.run(clock_mult * brian.ms
                    )  # I'm running one step each time this function is called
            r += 1
            yield (
                r,
                float(brian.defaultclock.t) * 1000,
                numpy.array(Input_layer.get_spiketimes()).astype(
                    dtype=numpy.float
                ),  # I'm doing this way to prove the spikes were received
                output_spikes,
                output_spikes_time
            )  # After the .send method, the generator executes this line and stops here

            output_spikes = [
            ]  # Cleans the output_spikes list so only the last spikes generated are sent
            output_spikes_time = [
            ]  # Cleans the output_spikes list so only the last spikes generated are sent