Example #1
0
def binaural_cochlea(sound, fs, c_freq, anf_num=(100, 0, 0), seed=None):
    """ Run two cochlea models where the sound is thee same apart from a
    predefined itd

    Parameters
    ----------
    sound : numpy.Array
        This contains two columns, the first for the ipsi the second
        for the contralateral ear.
    fs : int
        Sample frequency of the Sound in Hz
    c_freq : float
        center frequency of the auditory nerve fibres in Hz
    anf_num : tuple
        A tuple of lenght 3 giving the number of auditory nerve fibres.
        (HSR, MSR, LSR)
    seed : convertible to 32bit integer
        The seed for the random number generator

    Returns
    -------
    dict
        A dict containing the spikes and the neuron groups

        keys:
        - spikes: A list with the spike trains [ipsi spikes, contra spikes]
        - neuron_groups: A list with he brian neuron groups [ipsi group, contra group]

    """

    # Calculate Spiketrains for ipsi and contralateral sounds
    anf_i = coch.run_zilany2014(
        sound[:, 0],
        fs,
        anf_num=anf_num,
        cf=(c_freq, c_freq, 1),
        seed=seed,
        powerlaw='approximate',
        species='human',
    )

    anf_c = coch.run_zilany2014(sound[:, 1],
                                fs,
                                anf_num=anf_num,
                                cf=(c_freq, c_freq, 1),
                                seed=seed,
                                powerlaw='approximate',
                                species='human')

    # Create neuron groups.
    anf_group_c = make_anf_group(anf_c)
    anf_group_i = make_anf_group(anf_i)

    return {
        "spikes": [anf_i, anf_c],
        "neuron_groups": [anf_group_i, anf_group_c]
    }
def audio_to_gbc(fs=np.float,
                 input_sound=np.array([]),
                 anf_num=(4, 3, 2),
                 l_freq=None,
                 h_freq=None):
    cochlea_length = 35.0
    basilar_membrane_pos_high = np.log10(h_freq / 165.4 +
                                         0.88) * cochlea_length / 2.1
    basilar_membrane_pos_low = np.log10(l_freq / 165.4 +
                                        0.88) * cochlea_length / 2.1
    total_ihc = 500.0
    n_ihc = int(
        max(
            1,
            round(total_ihc *
                  (basilar_membrane_pos_high - basilar_membrane_pos_low) /
                  cochlea_length)))

    anf_spikes = cochlea.run_zilany2014(
        input_sound,
        fs,
        anf_num=anf_num,
        cf=(l_freq, h_freq, round(n_ihc)),
        species='human',
        seed=None,
    )
    anf_spikes = anf_spikes.sort_values(by='cf')
    return anf_spikes
Example #3
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200, 0, 0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
Example #4
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200,0,0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
Example #5
0
def main():

    fs = 100e3

    ### Make sound
    t = np.arange(0, 0.1, 1 / fs)
    s = dsp.chirp(t, 80, t[-1], 20000)
    s = cochlea.set_dbspl(s, 50)
    pad = np.zeros(10e-3 * fs)
    sound = np.concatenate((s, pad))

    ### Run model
    anf = cochlea.run_zilany2014(
        sound, fs, anf_num=(100, 0, 0), cf=(125, 20000, 100), seed=0, powerlaw="approximate", species="human"
    )

    ### Accumulate spike trains
    anf_acc = th.accumulate(anf, keep=["cf", "duration"])
    anf_acc.sort("cf", ascending=False, inplace=True)

    ### Plot auditory nerve response
    fig, ax = plt.subplots(2, 1)
    th.plot_signal(signal=sound, fs=fs, ax=ax[0])
    th.plot_neurogram(anf_acc, fs, ax=ax[1])
    plt.show()
Example #6
0
 def audio_to_spike_train(self, audio, fs=100e3):
     return cochlea.run_zilany2014(
         audio.astype(np.float64),
         fs,
         anf_num=(self.newron_in_layer, 0, 0),
         cf=10000,
         seed=0,
         species=self.species).loc[:, 'spikes':
                                   'duration']  # without cf and type
Example #7
0
def peripheralSpikes(sound, par, fs = -1):

    if fs == -1:
        fs = par['periphFs']

    anfTrains = cochlea.run_zilany2014(sound, fs, 
                                   anf_num = [60, 25, 15], 
                                   cf = par['cochChanns'], 
                                   species = 'human', seed = 0);

    return(anfTrains)
Example #8
0
def create_spk(S, fs=100e3, N=100, cf=500, seed=0, species="cat", anf_num=[]):
    """
    Create ANF spiketrains using the cochlea package
    """
    if len(anf_num) == 0:
        anf_num = (0, N, 0)
    import cochlea

    T = cochlea.run_zilany2014(
        S, fs=fs, anf_num=anf_num, cf=cf, species=species, seed=seed
    )
    return T
Example #9
0
def main():
    fs = 100e3
    cf = 8e3

    ### Make sound
    t = np.arange(0, 0.1, 1/fs)
    tone = np.sin(2*np.pi*t*cf)

    tone = cochlea.set_dbspl(tone, 20)

    pad = np.zeros(50e-3 * fs)
    sound = np.concatenate( (tone, pad) )



    ### Run model
    anf = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200,0,0),
        cf=cf,
        seed=0,
        powerlaw='approximate',
        species='human'
    )


    print(anf.head(20))


    ### Plot PSTH
    all_spikes = np.concatenate(anf.spikes)
    tmax = anf.duration.max()

    bin_size = 1e-3

    fig,ax = plt.subplots()

    ax.hist(
        all_spikes,
        bins=int(tmax/bin_size),
        range=(0,tmax),
        weights=np.ones_like(all_spikes)/bin_size/len(anf)
    )

    ax.set_xlabel("Time [s]")
    ax.set_ylabel("Rate [spikes/s]")
    ax.set_title("PSTH")

    plt.show()
Example #10
0
def main():
    fs = 100e3
    cf = 8e3

    ### Make sound
    t = np.arange(0, 0.1, 1/fs)
    tone = np.sin(2*np.pi*t*cf)

    tone = cochlea.set_dbspl(tone, 20)

    pad = np.zeros(50e-3 * fs)
    sound = np.concatenate( (tone, pad) )



    ### Run model
    anf = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200,0,0),
        cf=cf,
        seed=0,
        powerlaw='approximate',
        species='human'
    )


    print(anf.head(20))


    ### Plot PSTH
    all_spikes = np.concatenate(anf.spikes)
    tmax = anf.duration.max()

    bin_size = 1e-3

    fig,ax = plt.subplots()

    ax.hist(
        all_spikes,
        bins=int(tmax/bin_size),
        range=(0,tmax),
        weights=np.ones_like(all_spikes)/bin_size/len(anf)
    )

    ax.set_xlabel("Time [s]")
    ax.set_ylabel("Rate [spikes/s]")
    ax.set_title("PSTH")

    plt.show()
def main():
    fs = 100e3
    cf = 500
    convergence = (35, 0, 0)

    # Generate sound
    sound = wv.ramped_tone(fs=fs, freq=cf, duration=50e-3, pad=30e-3, dbspl=50)

    # Run inner ear model
    anf_trains = cochlea.run_zilany2014(sound=sound,
                                        fs=fs,
                                        cf=cf,
                                        anf_num=convergence,
                                        species='cat',
                                        seed=0)

    # Run GBC
    cn.set_celsius(37)
    cn.set_fs(fs)

    gbc = cn.GBC_Point(convergence=convergence,
                       cf=cf,
                       endbulb_class='tonic',
                       record_voltages=True)

    gbc.load_anf_trains(anf_trains, seed=0)

    cn.run(duration=len(sound) / fs, objects=[gbc])

    # Collect the results
    gbc_trains = gbc.get_trains()
    voltages = gbc.get_voltages()

    # Present the results
    print(gbc_trains)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    ax[0].set_title("ANF input")

    th.plot_signal(voltages, fs=fs, ax=ax[1])

    th.show()
Example #12
0
def main():

    fs = 100e3
    cf = 1e3
    tmax = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tmax,
        pad=30e-3,
        dbspl=50,
    )

    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        cf=cf,
        anf_num=(300, 0, 0),
        seed=0,
        species='cat',
    )

    anfs = cochlea.make_brian_group(anf_trains)

    print(anfs)


    brainstem = make_brainstem_group(100)

    print(brainstem)

    monitor = brian.SpikeMonitor(brainstem)


    net = brian.Network([brainstem, monitor])
    net.run(tmax*second)


    brian.raster_plot(monitor)
    brian.show()
Example #13
0
def main():

    fs = 100e3

    # Make sound
    t = np.arange(0, 0.1, 1/fs)
    s = dsp.chirp(t, 80, t[-1], 20000)
    s = cochlea.set_dbspl(s, 50)
    pad = np.zeros(int(10e-3 * fs))
    sound = np.concatenate( (s, pad) )

    # Run model
    anf = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(100,0,0),
        cf=(125, 20000, 100),
        seed=0,
        powerlaw='approximate',
        species='human',
    )

    # Accumulate spike trains
    anf_acc = th.accumulate(anf, keep=['cf', 'duration'])
    anf_acc.sort_values('cf', ascending=False, inplace=True)

    # Plot auditory nerve response
    fig, ax = plt.subplots(2,1)
    th.plot_signal(
        signal=sound,
        fs=fs,
        ax=ax[0]
    )
    th.plot_neurogram(
        anf_acc,
        fs,
        ax=ax[1]
    )
    plt.show()
Example #14
0
def main():

    fs = 100e3
    cf = 1e3
    tmax = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tmax,
        pad=30e-3,
        dbspl=50,
    )

    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        cf=cf,
        anf_num=(300, 0, 0),
        seed=0,
        species='cat',
    )

    anfs = cochlea.make_brian_group(anf_trains)

    print(anfs)

    brainstem = make_brainstem_group(100)

    print(brainstem)

    monitor = brian.SpikeMonitor(brainstem)

    net = brian.Network([brainstem, monitor])
    net.run(tmax * second)

    brian.raster_plot(monitor)
    brian.show()
Example #15
0
def generate_spiketrain(cf, sr, stim, seed, simulator=None, **kwds):
    """ Generate a new spike train from the auditory nerve model. Returns an 
    array of spike times in seconds.
    
    Parameters
    ----------
    cf : float
        Center frequency of the fiber to simulate
    sr : int
        Spontaneous rate group of the fiber: 
        0=low, 1=mid, 2=high.
    stim : Sound instance
        Stimulus sound to be presented on each repetition
    seed : int >= 0
        Random seed
    simulator : 'cochlea' | 'matlab' | None
        Specifies the auditory periphery simulator to use. If None, then a
        simulator will be automatically chosen based on availability.
    **kwds : 
        All other keyword arguments are given to model_ihc() and model_synapse()
        based on their names. These include 'species', 'nrep', 'reptime', 'cohc', 
        'cihc', and 'implnt'. 
        'simulator' is used to set the simulator ('matlab' or 'cochlea')
    """
    
    for k in ['pin', 'CF', 'fiberType', 'noiseType']:
        if k in kwds:
            raise TypeError("Argument '%s' is not allowed here." % k)
    
    ihc_kwds = dict(pin=stim.sound, CF=cf, nrep=1, tdres=stim.dt, 
                    reptime=stim.duration*2, cohc=1, cihc=1, species=1)
    syn_kwds = dict(CF=cf, nrep=1, tdres=stim.dt, fiberType=sr, noiseType=1, implnt=0)
    # copy any given keyword args to the correct model function
    for kwd in kwds:
        if kwd in ihc_kwds:
            ihc_kwds[kwd] = kwds.pop(kwd)
        if kwd in syn_kwds:
            syn_kwds[kwd] = kwds.pop(kwd)

    if simulator is None:
        simulator = detect_simulator()

    if len(kwds) > 0:
        raise TypeError("Invalid keyword arguments: %s" % list(kwds.keys()))
    
    if simulator == 'matlab':
        seed_rng(seed)
        vihc = model_ihc(_transfer=False, **ihc_kwds) 
        m, v, psth = model_synapse(vihc, _transfer=False, **syn_kwds)
        psth = psth.get().ravel()
        times = np.argwhere(psth).ravel()
        return times * stim.dt
    elif simulator == 'cochlea' and HAVE_COCHLEA:
        fs = int(0.5+1./stim.dt)  # need to avoid roundoff error
        srgrp = [0,0,0] # H, M, L (but input is 1=L, 2=M, H = 3)
        srgrp[2-sr] = 1
        sp = cochlea.run_zilany2014(
                stim.sound,
                fs=fs,
                anf_num=srgrp,
                cf=cf,
                seed=seed,
                species='cat')
        return np.array(sp.spikes.values[0])
    else:  # it remains possible to have a typo.... 
        raise ValueError("anmodel/cache.py: Simulator must be specified as either MATLAB or cochlea; found %s" % simulator)
Example #16
0
def main():

    Fs = float(100e3)
    #The entire simulation must be done at a common sampling frequency. As the Zilany model takes a minimum sampling frequency of 100KHz, everything is upsampled to that

    #Extracting speech signal and pre-processing:
    dictsample = sio.loadmat(
        'resampled.mat'
    )  #Resampled.mat is simply a speech signal resampled to 100KHz, here you can have anything that is sampled at 100KHz, be it a segment of a song or any arbritary signal. There is no file path because we assume that this file is the same directory
    sample = dictsample['newstory']
    #newstory is just the key title for the array
    sample = sample.flatten()  #making the signal a Row vector
    duration = float(2)
    #This is how long you want your sample to be, we take 2 seconds of the speech signal because it contains significant amount of vowel sounds but you can change this number to anything
    index = int(duration * Fs)
    #Converting the duration wanted into an index value using the sampling frequency
    mysample = sample[0:index]
    #Selecting the desired duration of signal interms of index value
    wv.set_dbspl(
        mysample, 78
    )  #setting the level to 78 dB SPL. SPL is the sound pressure level. This is an arbritary number and can be changed.

    brian.defaultclock.dt = 0.01 * ms
    #This coincides with the desired sampling frequency of 100KHz

    # Generate spike trains from Auditory Nerve Fibres using Cochlea Package
    anf_trains = cochlea.run_zilany2014(
        sound=mysample,
        fs=Fs,
        anf_num=(
            13, 3, 1
        ),  # (Amount of High spike rate, Medium spike rate, Low spike rate fibres. You can choose these as you want but these numbers are taken from the Verhulst et.al 2015 paper)
        cf=(125, 8000, 30),
        species='human',  #This can be changed to cat as well 
        seed=0,
        powerlaw=
        'approximate'  #The latest implementation of the Zilany model includes the power law representation however we do not want this to be too computationally intensive. Therefore we pick approximate
    )

    # Generate ANF and GBC groups in Brian using inbuilt functions in the Cochlear Nucleus package

    anfs = cn.make_anf_group(
        anf_trains
    )  #The amount of neurons for the Auditory Nerve = 30 * Amount of Fibres
    gbcs = cn.make_gbc_group(
        200
    )  #200 is the number of neurons for globular bushy cells in the cochlear nucleus. You can change this to any number but it doesn't affect the result

    # Connect ANFs and GBCs using the synapses class in Brian
    synapses = brian.Connection(
        anfs,
        gbcs,
        'ge_syn',
        delay=5 *
        ms  #This is important to make sure that there is a delay between the groups
    )

    #this value of convergence is taken from the Cochlear Nucleus documentation
    convergence = 20

    weight = cn.synaptic_weight(pre='anf', post='gbc', convergence=convergence)

    #Initiating the synaptic connections to be random with a fixed probability p that is proportional to the synaptic convergence
    synapses.connect_random(
        anfs,
        gbcs,
        p=convergence / len(anfs),
        fixed=True,
        weight=weight,
    )

    # Monitors for the GBCs. Brian Spike Monitors are objects that basically collect the amount of spikes in a neuron group
    gbc_spikes = brian.SpikeMonitor(gbcs)

    # Run the simulation using the run function in the CN package. This basically uses Brian's run function
    cn.run(
        duration=duration,
        objects=[anfs, gbcs, synapses,
                 gbc_spikes]  #include ANpop and CN pop in this if you need to
    )

    gbc_trains = th.make_trains(gbc_spikes)

    #Extracting the spike times for both AN and CN groups
    CNspikes = gbc_trains['spikes']
    ANspikes = anf_trains['spikes']

    #Converting dict format to array format so that spike train data is basically a one dimensional array or row vector of times where spikes occur
    CN_spikes = np.asarray(CNspikes)
    AN_spikes = np.asarray(ANspikes)

    #Saving it in the appropriate format so that we can do further processing in MATLAB
    data = {'CN': CN_spikes, 'AN': AN_spikes}
    sio.savemat('Spiketrains', data)

    #If you want to plot the rasters of these spike trains in MATPLOTLIB, you can use the following code:

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    th.plot_raster(gbc_trains, ax=ax[1])
    plt.show()
    plt.tight_layout()
def main():

    fs = 100e3  # s
    cf = 600  # Hz
    duration = 50e-3  # s

    # Simulation sampling frequency
    cn.set_fs(40e3)  # Hz

    # Generate sound
    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        dbspl=30,
    )

    # Generate ANF trains
    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        anf_num=(300, 0, 0),  # (HSR, MSR, LSR)
        cf=cf,
        species='cat',
        seed=0,
    )

    # Generate ANF and GBC groups
    anfs = cn.make_anf_group(anf_trains)
    gbcs = cn.make_gbc_group(100)

    # Connect ANFs and GBCs
    synapses = brian.Connection(
        anfs,
        gbcs,
        'ge_syn',
    )

    convergence = 20

    weight = cn.synaptic_weight(pre='anf', post='gbc', convergence=convergence)

    synapses.connect_random(anfs,
                            gbcs,
                            p=convergence / len(anfs),
                            fixed=True,
                            weight=weight)

    # Monitors for the GBCs
    spikes = brian.SpikeMonitor(gbcs)

    # Run the simulation
    cn.run(duration=len(sound) / fs, objects=[anfs, gbcs, synapses, spikes])

    # Present the results
    gbc_trains = th.make_trains(spikes)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    th.plot_raster(gbc_trains, ax=ax[1])

    plt.show()
    rate = f.getframerate()
    duration = frames / float(rate)

spf = wave.open(fname, "r")

# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.frombuffer(signal, dtype=np.int16)
number_lv = np.ceil(np.log10(np.max(signal)))

spf.close()

signal = signal / 10.**number_lv

signal = dsp.resample(signal, int(fs * duration))

plt.figure(1)
plt.title("Signal Wave...")
wv.plot_signal(signal, int(fs))
plt.show()

anf_trains = cochlea.run_zilany2014(signal,
                                    int(fs),
                                    anf_num=(0, 0, 50),
                                    cf=(125, 10e3, 100),
                                    seed=0,
                                    species='human')

th.plot_raster(th.accumulate(anf_trains))
plt.show()
def run_exp(c_freq, itd, str_e, str_i):
    # br.globalprefs.set_global_preferences(useweave=True, openmp=True, usecodegen=True,
    #                                       usecodegenweave=True )

    br.defaultclock.dt = 20E-6 * second

    #Basic Parameters
    fs_coch = 100e3  # s/second
    duration = 100E-3  # seconds##
    pad = 20E-3  # second
    n_neuron = 300
    dbspl = 50
    n_neuron = 500
    dt_coch = 1 / fs_coch
    n_pad = int(pad / dt_coch)
    n_itd = int(itd / dt_coch)
    const_dt = 100e-6

    sound = audio.generate_tone(c_freq, duration, fs_coch)
    sound = sound * audio.cosine_fade_window(sound, 20e-3, fs_coch)
    sound = coch.set_dbspl(sound, dbspl)
    sound = np.concatenate((np.zeros(n_pad), sound, np.zeros(n_pad)))
    sound = audio.delay_signal(sound, np.abs(itd), fs_coch)

    if itd < 0:
        sound = sound[:, ::-1]
    duration = len(sound) / fs_coch

    # construct ipsi and contra-lateral ANF trains and convert them to
    # neuron groups
    cochlea_train_left = coch.run_zilany2014(sound=sound[:, 0],
                                             fs=fs_coch,
                                             anf_num=(n_neuron, 0, 0),
                                             cf=c_freq,
                                             species='human',
                                             seed=0)

    cochlea_train_right = coch.run_zilany2014(sound=sound[:, 1],
                                              fs=fs_coch,
                                              anf_num=(n_neuron, 0, 0),
                                              cf=c_freq,
                                              species='human',
                                              seed=0)

    anf_group_left = coch.make_brian_group(cochlea_train_left)
    anf_group_right = coch.make_brian_group(cochlea_train_right)

    # Setup a new mso group and new gbc groups
    mso_group_left = make_mso_group(n_neuron)
    mso_group_right = make_mso_group(n_neuron)

    gbc_group_left = cochlea_to_gbc(anf_group_left, n_neuron)
    gbc_group_right = cochlea_to_gbc(anf_group_right, n_neuron)

    # Synaptic connections for the groups
    syn_mso_l_in_ipsi, syn_mso_l_in_contra = inhibitory_to_mso(
        mso_group=mso_group_left,
        ipsi_group=gbc_group_left['neuron_groups'][0],
        contra_group=gbc_group_right['neuron_groups'][0],
        strength=str_i,
        ipsi_delay=0,
        contra_delay=-0.6e-3 + const_dt)

    syn_mso_r_in_ipsi, syn_mso_r_in_contra = inhibitory_to_mso(
        mso_group=mso_group_right,
        ipsi_group=gbc_group_right['neuron_groups'][0],
        contra_group=gbc_group_left['neuron_groups'][0],
        strength=str_i,
        ipsi_delay=0,
        contra_delay=-0.6e-3 + const_dt)

    syn_mso_l_ex_ipsi, syn_mso_l_ex_contra = excitatory_to_mso(
        mso_group=mso_group_left,
        ipsi_group=anf_group_left,
        contra_group=anf_group_right,
        strength=str_e,
        contra_delay=const_dt)

    syn_mso_r_ex_ipsi, syn_mso_r_ex_contra = excitatory_to_mso(
        mso_group=mso_group_right,
        ipsi_group=anf_group_right,
        contra_group=anf_group_left,
        strength=str_e,
        contra_delay=const_dt)

    sp_mon_left = br.SpikeMonitor(mso_group_left, record=True)
    sp_mon_right = br.SpikeMonitor(mso_group_right, record=True)

    network = ([
        mso_group_left, mso_group_right, anf_group_left, anf_group_right,
        syn_mso_l_ex_ipsi, syn_mso_l_ex_contra, syn_mso_l_in_ipsi,
        syn_mso_l_in_contra, syn_mso_r_ex_ipsi, syn_mso_r_ex_contra,
        syn_mso_r_in_ipsi, syn_mso_r_in_contra, sp_mon_left, sp_mon_right
    ] + gbc_group_left['neuron_groups'] + gbc_group_right['neuron_groups'])

    run(duration, network)

    mso_train_left = thorns.make_trains(sp_mon_left)
    mso_train_right = thorns.make_trains(sp_mon_right)

    return {'spikes_left': mso_train_left, 'spikes_right': mso_train_right}