Example #1
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200,0,0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
Example #2
0
def main():
    ### Generate the stimulus
    fs = 10e3  # [Hz]
    amp = 400e-6  # [A]

    s = np.zeros(30e-3 * fs)  # 30 ms
    s[100:105] = -amp  # [A]
    s[105:110] = +amp  # [A]

    stim = {3: s, 8: np.roll(s, 100)}

    ### Run CI simulation
    trains = sg.run_ci_simulation(
        stim=stim,
        fs=fs,
        anf_num=10,
        # map_backend='multiprocessing'
    )

    ### Plot results
    fig, ax = plt.subplots(2, 1, sharex=True)

    th.plot_signal(stim[3], fs=fs, ax=ax[0])
    th.plot_signal(stim[8], fs=fs, ax=ax[0])
    ax[0].set_ylabel("Amplitude [A]")

    th.plot_raster(trains, ax=ax[1])

    plt.show()
Example #3
0
def main():

    fs = 48e3
    cf = cochlea.get_nearest_cf_holmberg2007(1e3)

    ### Make sound
    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=150e-3,
        pad=10e-3,
        dbspl=70,
    )

    ### Run model
    anf_trains = sg.run_holmberg2007_sg(
        sound,
        fs,
        cf=cf,
        anf_num=(10, 0, 0),
        seed=0,
    )

    print(th.firing_rate(anf_trains))

    ### Plot auditory nerve response
    fig, ax = plt.subplots(2, 1)
    th.plot_signal(signal=sound, fs=fs, ax=ax[0])

    th.plot_raster(anf_trains, ax=ax[1])

    plt.show()
def main():

    fs = 48e3
    tmax = 0.1

    ### Make sound
    t = np.arange(0, tmax, 1/fs)
    s = np.zeros_like(t)


    ### Run model
    vesicle_trains = cochlea.run_holmberg2007_vesicles(
        s,
        fs,
        anf_num=(1,0,0),
        seed=0,
    )



    print(vesicle_trains)

    ### Need to rename a column: vesicles -> spikes, because that's
    ### the expected name by many functions in thorns
    trains = vesicle_trains.rename(columns={'vesicles': 'spikes'})

    ### Calculate average rate
    rate = th.firing_rate(trains)
    print()
    print("Spontanious rate:", rate)


    ### Raster plot
    th.plot_raster(trains)
    th.show()
Example #5
0
def thalamicInput(lagSpace, par, est, raster = False):

    fs = par['periphFs']

    # Subcortical processing
    sound = soch.createStimulus(est, par['periphFs'])
    prob = moch.peripheral(sound, par)

    [A, n, b] = moch.subcortical(prob, lagSpace, par)

    for i in range(1, par['subCortAff']):
        sound = soch.createStimulus(est, par['periphFs'])
        prob = moch.peripheral(sound, par)
        [A0, n0, b0] = moch.subcortical(prob, lagSpace, par)
        A = A + A0
        n = n + n0
        b = b + b0

    A = (1. / par['subCortAff']) * A
    n = (1. / par['subCortAff']) * n
    b = (1. / par['subCortAff']) * b

    if raster:    
        anfTrains = moch.peripheralSpikes(sound, par, fs = -1)
        thorns.plot_raster(anfTrains)
        thorns.show()

    return [A, n, b]
Example #6
0
def main():

    ### Load spike trains
    spike_trains = load_anf_zilany2014()

    print(spike_trains.head())



    ### Calculate vector strength
    cf, = spike_trains.cf.unique()
    onset = 10e-3               # ms

    trimmed = th.trim(spike_trains, onset, None)
    vs = th.vector_strength(trimmed, freq=cf)

    print()
    print("Vector strength: {}".format(vs))



    ### Plot raster plot
    th.plot_raster(spike_trains)



    ### Show the plot
    th.show()                   # Equivalent to plt.show()
Example #7
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200, 0, 0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
def main():

    fs = 48e3
    tmax = 0.1

    ### Make sound
    t = np.arange(0, tmax, 1/fs)
    s = np.zeros_like(t)


    ### Run model
    vesicle_trains = cochlea.run_holmberg2007_vesicles(
        s,
        fs,
        anf_num=(1,0,0),
        seed=0,
    )



    print(vesicle_trains)

    ### Need to rename a column: vesicles -> spikes, because that's
    ### the expected name by many functions in thorns
    trains = vesicle_trains.rename(columns={'vesicles': 'spikes'})

    ### Calculate average rate
    rate = th.firing_rate(trains)
    print()
    print("Spontanious rate:", rate)


    ### Raster plot
    th.plot_raster(trains)
    th.show()
Example #9
0
def main():

    fs = 100e3

    ### Make sound
    t = np.arange(0, 0.1, 1/fs)
    s = dsp.chirp(t, 80, t[-1], 20000)
    s = cochlea.set_dbspl(s, 50)
    s = np.concatenate( (s, np.zeros(10e-3 * fs)) )



    ### Run model
    anf = cochlea.run_zilany2009(
        s,
        fs,
        anf_num=(100,0,0),
        cf=(80, 20000, 100),
        seed=0,
        powerlaw='approximate'
    )



    ### Plot auditory nerve response
    anf_acc = th.accumulate(anf, keep=['cf', 'duration'])
    anf_acc.sort('cf', ascending=False, inplace=True)

    cfs = anf.cf.unique()

    fig, ax = plt.subplots(2,1, sharex=True)
    th.plot_neurogram(
        anf_acc,
        fs,
        ax=ax[0]
    )

    th.plot_raster(
        anf[anf.cf==cfs[30]],
        ax=ax[1]
    )

    ax[1].set_title("CF = {}".format(cfs[30]))

    plt.show()
def main():
    fs = 100e3
    cf = 500
    convergence = (35, 0, 0)

    # Generate sound
    sound = wv.ramped_tone(fs=fs, freq=cf, duration=50e-3, pad=30e-3, dbspl=50)

    # Run inner ear model
    anf_trains = cochlea.run_zilany2014(sound=sound,
                                        fs=fs,
                                        cf=cf,
                                        anf_num=convergence,
                                        species='cat',
                                        seed=0)

    # Run GBC
    cn.set_celsius(37)
    cn.set_fs(fs)

    gbc = cn.GBC_Point(convergence=convergence,
                       cf=cf,
                       endbulb_class='tonic',
                       record_voltages=True)

    gbc.load_anf_trains(anf_trains, seed=0)

    cn.run(duration=len(sound) / fs, objects=[gbc])

    # Collect the results
    gbc_trains = gbc.get_trains()
    voltages = gbc.get_voltages()

    # Present the results
    print(gbc_trains)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    ax[0].set_title("ANF input")

    th.plot_signal(voltages, fs=fs, ax=ax[1])

    th.show()
Example #11
0
def main():

    fs = 100e3

    # Make sound
    t = np.arange(0, 0.1, 1/fs)
    s = dsp.chirp(t, 80, t[-1], 20000)
    s = cochlea.set_dbspl(s, 50)
    s = np.concatenate((s, np.zeros(int(10e-3*fs))))

    # Run model
    anf = cochlea.run_zilany2009(
        s,
        fs,
        anf_num=(100,0,0),
        cf=(80, 20000, 100),
        seed=0,
        powerlaw='approximate'
    )

    # Plot auditory nerve response
    anf_acc = th.accumulate(anf, keep=['cf', 'duration'])
    anf_acc.sort_values('cf', ascending=False, inplace=True)

    cfs = anf.cf.unique()

    fig, ax = plt.subplots(2,1, sharex=True)
    th.plot_neurogram(
        anf_acc,
        fs,
        ax=ax[0]
    )

    th.plot_raster(
        anf[anf.cf==cfs[30]],
        ax=ax[1]
    )

    ax[1].set_title("CF = {}".format(cfs[30]))

    plt.show()
Example #12
0
def main():

    # Load spike trains
    spike_trains = load_anf_zilany2014()

    print(spike_trains.head())

    # Calculate vector strength
    cf, = spike_trains.cf.unique()
    onset = 10e-3

    trimmed = th.trim(spike_trains, onset, None)
    vs = th.vector_strength(trimmed, freq=cf)

    print()
    print("Vector strength: {}".format(vs))

    # Plot raster plot
    th.plot_raster(spike_trains)

    # Show the plot
    th.show()  # Equivalent to plt.show()
Example #13
0
def main():

    Fs = float(100e3)
    #The entire simulation must be done at a common sampling frequency. As the Zilany model takes a minimum sampling frequency of 100KHz, everything is upsampled to that

    #Extracting speech signal and pre-processing:
    dictsample = sio.loadmat(
        'resampled.mat'
    )  #Resampled.mat is simply a speech signal resampled to 100KHz, here you can have anything that is sampled at 100KHz, be it a segment of a song or any arbritary signal. There is no file path because we assume that this file is the same directory
    sample = dictsample['newstory']
    #newstory is just the key title for the array
    sample = sample.flatten()  #making the signal a Row vector
    duration = float(2)
    #This is how long you want your sample to be, we take 2 seconds of the speech signal because it contains significant amount of vowel sounds but you can change this number to anything
    index = int(duration * Fs)
    #Converting the duration wanted into an index value using the sampling frequency
    mysample = sample[0:index]
    #Selecting the desired duration of signal interms of index value
    wv.set_dbspl(
        mysample, 78
    )  #setting the level to 78 dB SPL. SPL is the sound pressure level. This is an arbritary number and can be changed.

    brian.defaultclock.dt = 0.01 * ms
    #This coincides with the desired sampling frequency of 100KHz

    # Generate spike trains from Auditory Nerve Fibres using Cochlea Package
    anf_trains = cochlea.run_zilany2014(
        sound=mysample,
        fs=Fs,
        anf_num=(
            13, 3, 1
        ),  # (Amount of High spike rate, Medium spike rate, Low spike rate fibres. You can choose these as you want but these numbers are taken from the Verhulst et.al 2015 paper)
        cf=(125, 8000, 30),
        species='human',  #This can be changed to cat as well 
        seed=0,
        powerlaw=
        'approximate'  #The latest implementation of the Zilany model includes the power law representation however we do not want this to be too computationally intensive. Therefore we pick approximate
    )

    # Generate ANF and GBC groups in Brian using inbuilt functions in the Cochlear Nucleus package

    anfs = cn.make_anf_group(
        anf_trains
    )  #The amount of neurons for the Auditory Nerve = 30 * Amount of Fibres
    gbcs = cn.make_gbc_group(
        200
    )  #200 is the number of neurons for globular bushy cells in the cochlear nucleus. You can change this to any number but it doesn't affect the result

    # Connect ANFs and GBCs using the synapses class in Brian
    synapses = brian.Connection(
        anfs,
        gbcs,
        'ge_syn',
        delay=5 *
        ms  #This is important to make sure that there is a delay between the groups
    )

    #this value of convergence is taken from the Cochlear Nucleus documentation
    convergence = 20

    weight = cn.synaptic_weight(pre='anf', post='gbc', convergence=convergence)

    #Initiating the synaptic connections to be random with a fixed probability p that is proportional to the synaptic convergence
    synapses.connect_random(
        anfs,
        gbcs,
        p=convergence / len(anfs),
        fixed=True,
        weight=weight,
    )

    # Monitors for the GBCs. Brian Spike Monitors are objects that basically collect the amount of spikes in a neuron group
    gbc_spikes = brian.SpikeMonitor(gbcs)

    # Run the simulation using the run function in the CN package. This basically uses Brian's run function
    cn.run(
        duration=duration,
        objects=[anfs, gbcs, synapses,
                 gbc_spikes]  #include ANpop and CN pop in this if you need to
    )

    gbc_trains = th.make_trains(gbc_spikes)

    #Extracting the spike times for both AN and CN groups
    CNspikes = gbc_trains['spikes']
    ANspikes = anf_trains['spikes']

    #Converting dict format to array format so that spike train data is basically a one dimensional array or row vector of times where spikes occur
    CN_spikes = np.asarray(CNspikes)
    AN_spikes = np.asarray(ANspikes)

    #Saving it in the appropriate format so that we can do further processing in MATLAB
    data = {'CN': CN_spikes, 'AN': AN_spikes}
    sio.savemat('Spiketrains', data)

    #If you want to plot the rasters of these spike trains in MATPLOTLIB, you can use the following code:

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    th.plot_raster(gbc_trains, ax=ax[1])
    plt.show()
    plt.tight_layout()
    rate = f.getframerate()
    duration = frames / float(rate)

spf = wave.open(fname, "r")

# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.frombuffer(signal, dtype=np.int16)
number_lv = np.ceil(np.log10(np.max(signal)))

spf.close()

signal = signal / 10.**number_lv

signal = dsp.resample(signal, int(fs * duration))

plt.figure(1)
plt.title("Signal Wave...")
wv.plot_signal(signal, int(fs))
plt.show()

anf_trains = cochlea.run_zilany2014(signal,
                                    int(fs),
                                    anf_num=(0, 0, 50),
                                    cf=(125, 10e3, 100),
                                    seed=0,
                                    species='human')

th.plot_raster(th.accumulate(anf_trains))
plt.show()
Example #15
0
 def plot_spike_array_in_trains(self, spike_array, fs=100e3):
     th.plot_raster(self.array_to_trains(spike_array, fs))
Example #16
0
 def plot_audio_in_trains(self, audio, fs=100e3):
     th.plot_raster(self.audio_to_spike_train(audio, fs))
def main():

    fs = 100e3  # s
    cf = 600  # Hz
    duration = 50e-3  # s

    # Simulation sampling frequency
    cn.set_fs(40e3)  # Hz

    # Generate sound
    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        dbspl=30,
    )

    # Generate ANF trains
    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        anf_num=(300, 0, 0),  # (HSR, MSR, LSR)
        cf=cf,
        species='cat',
        seed=0,
    )

    # Generate ANF and GBC groups
    anfs = cn.make_anf_group(anf_trains)
    gbcs = cn.make_gbc_group(100)

    # Connect ANFs and GBCs
    synapses = brian.Connection(
        anfs,
        gbcs,
        'ge_syn',
    )

    convergence = 20

    weight = cn.synaptic_weight(pre='anf', post='gbc', convergence=convergence)

    synapses.connect_random(anfs,
                            gbcs,
                            p=convergence / len(anfs),
                            fixed=True,
                            weight=weight)

    # Monitors for the GBCs
    spikes = brian.SpikeMonitor(gbcs)

    # Run the simulation
    cn.run(duration=len(sound) / fs, objects=[anfs, gbcs, synapses, spikes])

    # Present the results
    gbc_trains = th.make_trains(spikes)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    th.plot_raster(gbc_trains, ax=ax[1])

    plt.show()