示例#1
0
def test_store_restore_magic():
    source = NeuronGroup(10, '''dv/dt = rates : 1
                                rates : Hz''', threshold='v>1', reset='v=0')
    source.rates = 'i*100*Hz'
    target = NeuronGroup(10, 'v:1')
    synapses = Synapses(source, target, model='w:1', pre='v+=w', connect='i==j')
    synapses.w = 'i*1.0'
    synapses.delay = 'i*ms'
    state_mon = StateMonitor(target, 'v', record=True)
    spike_mon = SpikeMonitor(source)
    store()  # default time slot
    run(10*ms)
    store('second')
    run(10*ms)
    v_values = state_mon.v[:, :]
    spike_indices, spike_times = spike_mon.it_

    restore() # Go back to beginning
    assert magic_network.t == 0*ms
    run(20*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])

    # Go back to middle
    restore('second')
    assert magic_network.t == 10*ms
    run(10*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])
示例#2
0
def test_store_restore_magic_to_file():
    filename = tempfile.mktemp(suffix='state', prefix='brian_test')
    source = NeuronGroup(10,
                         '''dv/dt = rates : 1
                                rates : Hz''',
                         threshold='v>1',
                         reset='v=0')
    source.rates = 'i*100*Hz'
    target = NeuronGroup(10, 'v:1')
    synapses = Synapses(source,
                        target,
                        model='w:1',
                        pre='v+=w',
                        connect='i==j')
    synapses.w = 'i*1.0'
    synapses.delay = 'i*ms'
    state_mon = StateMonitor(target, 'v', record=True)
    spike_mon = SpikeMonitor(source)
    store(filename=filename)  # default time slot
    run(10 * ms)
    store('second', filename=filename)
    run(10 * ms)
    v_values = state_mon.v[:, :]
    spike_indices, spike_times = spike_mon.it_

    restore(filename=filename)  # Go back to beginning
    assert magic_network.t == 0 * ms
    run(20 * ms)
    assert defaultclock.t == 20 * ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])

    # Go back to middle
    restore('second', filename=filename)
    assert magic_network.t == 10 * ms
    run(10 * ms)
    assert defaultclock.t == 20 * ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])
    try:
        os.remove(filename)
    except OSError:
        pass
示例#3
0
def test_filterbankgroup_restart():
    sound1 = tone(1 * kHz, .1 * second)
    sound2 = whitenoise(.1 * second)
    sound = sound1 + sound2
    sound = sound.ramp()
    cf = erbspace(20 * Hz, 20 * kHz, 3000)
    cochlea = Gammatone(sound, cf)
    # Half-wave rectification and compression [x]^(1/3)
    ihc = FunctionFilterbank(cochlea, lambda x: 3 * np.clip(x, 0, np.inf) ** (1.0 / 3.0))
    # Leaky integrate-and-fire model with noise and refractoriness
    eqs = '''
    dv/dt = (I-v)/(1*ms)+0.2*xi*(2/(1*ms))**.5 : 1 (unless refractory)
    I : 1
    '''
    anf = FilterbankGroup(ihc, 'I', eqs, reset='v=0', threshold='v>1', refractory=5*ms, method='euler')
    M = SpikeMonitor(anf)
    store()
    run(sound.duration)
    restore()
    run(sound.duration)
示例#4
0
def test_store_restore_magic_to_file():
    filename = tempfile.mktemp(suffix='state', prefix='brian_test')
    source = NeuronGroup(10, '''dv/dt = rates : 1
                                rates : Hz''', threshold='v>1', reset='v=0')
    source.rates = 'i*100*Hz'
    target = NeuronGroup(10, 'v:1')
    synapses = Synapses(source, target, model='w:1', on_pre='v+=w')
    synapses.connect(j='i')
    synapses.w = 'i*1.0'
    synapses.delay = 'i*ms'
    state_mon = StateMonitor(target, 'v', record=True)
    spike_mon = SpikeMonitor(source)
    store(filename=filename)  # default time slot
    run(10*ms)
    store('second', filename=filename)
    run(10*ms)
    v_values = state_mon.v[:, :]
    spike_indices, spike_times = spike_mon.it_

    restore(filename=filename) # Go back to beginning
    assert magic_network.t == 0*ms
    run(20*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])

    # Go back to middle
    restore('second', filename=filename)
    assert magic_network.t == 10*ms
    run(10*ms)
    assert defaultclock.t == 20*ms
    assert_equal(v_values, state_mon.v[:, :])
    assert_equal(spike_indices, spike_mon.i[:])
    assert_equal(spike_times, spike_mon.t_[:])
    try:
        os.remove(filename)
    except OSError:
        pass
示例#5
0
def main2(plot=True):
    # Optimizing the code from main1 by only making the networks just once before the loop
    # running multiple separate simulations
    bs.start_scope()
    num_inputs = 100
    input_rate = 10 * bs.Hz
    weight = 0.1

    # range of time constants
    tau_range = bs.linspace(start=1, stop=10, num=30) * bs.ms
    # storing output rates
    output_rates = []

    P = bs.PoissonGroup(num_inputs, rates=input_rate)
    eqs = """
     dv/dt = -v/tau : 1
     """
    G = bs.NeuronGroup(1, eqs, threshold='v>1', reset='v=0', method='exact')
    # connect the external spike source with the neurons
    # instead of connecting the same neurons like in previous examples
    S = bs.Synapses(P, G, on_pre='v += weight')
    S.connect()

    # visualise_connectivity(S)
    spike_monitor = bs.SpikeMonitor(source=G)
    bs.store()  # stores the current state of the network
    for tau in tau_range:
        # calling this allows for resetting of the network to its original state
        bs.restore()
        bs.run(1 * bs.second)
        n_spikes = spike_monitor.num_spikes
        output_rates.append(n_spikes / bs.second)
    if plot:
        plt.clf()
        plt.plot(tau_range / bs.ms, output_rates)
        plt.xlabel(r'$\tau$ (ms)')
        plt.ylabel('Firing Rate (spikes/s)')
        plt.show()
def simulation1():
    bs.start_scope()

    rates = [10, 20, 30]
    bs.store()

    train1 = generate_gaussian_spike_train(mean=2.5, std=0.5)
    train2 = generate_gaussian_spike_train(mean=3, std=1.2)
    # exit(0)
    train_len = train1.shape[0]
    # train_len = 10

    total_time = None
    total_spikes = None

    for i in range(train_len):
        r1 = train1[i]
        r2 = train2[i]
        place_cell = bs.NeuronGroup(1,
                                    model=lif,
                                    reset=reset_eq,
                                    threshold=threshold_eq,
                                    method="euler")

        place_cell.tau_m = TAU_M
        place_cell.tau_s = TAU_S

        place_cell.v = -80.0 * bs.mV

        print(f"Rates: {r1, r2}")
        # bs.restore()
        input = bs.PoissonGroup(2, rates=np.array([r1, r2]) * bs.Hz)

        # connect input poisson spike generator to the input cells (grid and boundary vector)
        S1 = bs.Synapses(input, place_cell, on_pre=synaptic_update)
        S1.connect = S1.connect(i=[0, 1], j=0)
        step_per_time = 100
        place_cell_v_monitor = bs.StateMonitor(place_cell,
                                               'v',
                                               record=True,
                                               dt=(dt / step_per_time) *
                                               bs.second)

        place_cell_monitor = bs.SpikeMonitor(source=place_cell)

        bs.run(dt * bs.second)

        spikes_i = place_cell_monitor.i
        spikes_t = place_cell_monitor.t

        print(spikes_i)
        print(spikes_t)

        if total_spikes is None:
            total_spikes = spikes_t / bs.ms
        else:
            total_spikes = np.concatenate(
                [total_spikes, (i * step_per_time) + spikes_t / bs.ms])

        print("time", place_cell_v_monitor.t / bs.ms)
        if total_time is None:
            total_time = place_cell_v_monitor.t / bs.ms
        else:
            total_time = np.concatenate([
                total_time,
                (i * step_per_time) + place_cell_v_monitor.t / bs.ms
            ])
    total_time = interp_based(total_time, N=10)
    print(type(total_time))
    print(total_time.shape)
    print(total_time)
    print(total_spikes)
    plt.figure()
    _, ind, _ = np.intersect1d(total_time,
                               total_spikes,
                               assume_unique=True,
                               return_indices=True)
    spikes = np.zeros_like(total_time)
    spikes[ind] = 1
    plt.plot(total_time, spikes)
    plt.xlabel('Time (ms)')
    plt.ylabel('v')
    plt.title(f"Spikes")
    plt.show()
示例#7
0
spikesgpi = b2.SpikeMonitor(GPI)

spikemonitors = [
    spikesd1, spikesd2, spikesfsn, spikesgpi, spikesgpta, spikesgpti, spikesstn
]
nuclei = ['D1', 'D2', 'FSN', 'GPi', 'GPeTA', 'GPeTI', 'STN']

firingratesd1 = []
firingratesd2 = []
firingratesfsn = []
firingratesgpi = []
firingratesgpta = []
firingratesgpti = []
firingratesstn = []

b2.store()

c_var = np.arange(0., 0.11, 0.01)
# c_var = [0.]

# file_path = os.path.join(os.environ['USERPROFILE'], 'Desktop', 'frandff.txt')
file_path = '/home/f_mastellone/frandff.txt'
text_file = open(file_path, "w")

for i, c_i in enumerate(c_var):
    b2.restore()
    c = c_i

    for spikemonitor in spikemonitors:
        spikemonitor.active = False
    b2.run(300 * b2.ms)
示例#8
0
import brian2 as b
import matplotlib.pyplot as plt
import numpy as np
print("\nLibraries loaded.")

b.start_scope()

time_per_neuron = 400 * b.ms
n_times = 100
n_neurons = 2
freq = 50 * b.Hz

P = b.PoissonGroup(1, freq)
M = b.SpikeMonitor(P)
b.store()

print("Simulating-1...", end='', flush=True)
spike_times = []
spike_indices = []
start_time = 0
for k in range(n_times):
    for n in range(n_neurons):
        b.restore()
        b.run(time_per_neuron)
        percentage = 100.0 * (k * n_neurons + n) / (n_times * n_neurons + 0.0)
        print("\rSimulating-1... t={}%".format(round(percentage, 2)),
              end='',
              flush=True)
        spike_times += [a + start_time for a in list(M.t)]
        spike_indices += [n] * M.num_spikes