Example #1
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200,0,0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
Example #2
0
def main():

    fs = 100e3
    cf = 1e3
    dbspl = 50
    tone_duration = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        dbspl=dbspl,
    )

    anf_trains = cochlea.run_zilany2014(
        sound,
        fs,
        anf_num=(200, 0, 0),
        cf=cf,
        seed=0,
        species='cat',
    )

    anf_trains.to_pickle("anf_zilany2014.pkl")

    th.plot_raster(anf_trains)
    th.show()
Example #3
0
def _run_model(model, dbspl, cf, model_pars, tone_duration):

    onset = 10e-3
    assert tone_duration > onset

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250,250,250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        pad=0,
        dbspl=dbspl
    )

    anf = model(
        sound=sound,
        cf=cf,
        **model_pars
    )

    rates = {}
    for typ,trains in anf.groupby('type'):
        trimmed = th.trim(trains, onset, None)
        rate = th.firing_rate(trimmed)
        rates[typ] = rate

    return rates
Example #4
0
def _run_model(model, dbspl, cf, model_pars):

    duration = 100e-3
    onset = 10e-3

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250, 250, 250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(fs=fs,
                           freq=cf,
                           duration=duration,
                           pad=0,
                           dbspl=dbspl)

    anf = model(sound=sound, cf=cf, **model_pars)

    # th.plot_raster(anf)
    # th.show()

    # We want to make sure the the output CF is equal to the desired
    # CF.
    real_cf, = np.unique(anf['cf'])
    assert real_cf == cf

    vss = {}
    for typ, group in anf.groupby('type'):
        trimmed = th.trim(group, onset, None)
        vs = th.vector_strength(trimmed, cf)
        vss[typ] = vs

    return vss
Example #5
0
def main():

    fs = 48e3
    cf = cochlea.get_nearest_cf_holmberg2007(1e3)

    ### Make sound
    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=150e-3,
        pad=10e-3,
        dbspl=70,
    )

    ### Run model
    anf_trains = sg.run_holmberg2007_sg(
        sound,
        fs,
        cf=cf,
        anf_num=(10, 0, 0),
        seed=0,
    )

    print(th.firing_rate(anf_trains))

    ### Plot auditory nerve response
    fig, ax = plt.subplots(2, 1)
    th.plot_signal(signal=sound, fs=fs, ax=ax[0])

    th.plot_raster(anf_trains, ax=ax[1])

    plt.show()
Example #6
0
def create_sound(fs=100e3, freq=500, duration=0.4, dbspl=60):
    """
    Create stimulus
    """
    import thorns.waves as wv

    S = wv.ramped_tone(fs=fs, freq=freq, duration=duration, dbspl=dbspl)
    return S
Example #7
0
def _run_model(model, dbspl, cf, model_pars):

    duration = 100e-3
    onset = 10e-3

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250,250,250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        pad=0,
        dbspl=dbspl
    )

    anf = model(
        sound=sound,
        cf=cf,
        **model_pars
    )


    ### We want to make sure the the output CF is equal to the desired
    ### CF.
    real_cf, = np.unique(anf['cf'])
    assert real_cf == cf

    hsr = anf[anf['type']=='hsr']
    hsr = th.trim(hsr, onset, None)
    si_hsr = th.vector_strength(hsr, cf)

    msr = anf[anf['type']=='msr']
    msr = th.trim(msr, onset, None)
    si_msr = th.vector_strength(msr, cf)

    lsr = anf[anf['type']=='lsr']
    lsr = th.trim(lsr, onset, None)
    si_lsr = th.vector_strength(lsr, cf)

    # print(si_hsr)
    # th.plot_raster(anf)
    # th.show()

    vss = {
        'hsr': si_hsr,
        'msr': si_msr,
        'lsr': si_lsr,
    }

    return vss
Example #8
0
def error_func(
        dbspl,
        model,
        cf,
        spont_rate,
        model_pars,
        asr_filter=False,
        freq=None
):

    pars = dict(model_pars)

    fs = pars.setdefault('fs', 100e3)
    pars.setdefault('seed', 0)
    pars.setdefault('anf_num', (1000, 0, 0))

    if freq is None:
        freq = cf

    tone_duration = 250e-3
    onset = 15e-3

    sound = wv.ramped_tone(
        fs,
        freq,
        duration=tone_duration,
        ramp=2.5e-3,
        pad=0,
        dbspl=dbspl
    )

    if asr_filter:
        sound = adjust_to_human_thresholds(sound, fs, model)

    anf = model(
        sound=sound,
        cf=cf,
        **pars
    )

    trains = th.trim(anf, onset, None)
    rate = th.firing_rate(trains)

    error = rate - spont_rate

    logging.debug("{} {} {}".format(dbspl, rate, error))

    return error
Example #9
0
def _run_model(model, dbspl, cf, model_pars, tone_duration):

    onset = 10e-3
    assert tone_duration > onset

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250,250,250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tone_duration,
        pad=0,
        dbspl=dbspl
    )

    anf = model(
        sound=sound,
        cf=cf,
        **model_pars
    )


    ### TODO: try to use DataFrame.groupby instead
    hsr = anf.query("type == 'hsr'")
    hsr = th.trim(hsr, onset, None)
    rate_hsr = th.firing_rate(hsr)

    msr = anf.query("type == 'msr'")
    msr = th.trim(msr, onset, None)
    rate_msr = th.firing_rate(msr)

    lsr = anf.query("type == 'lsr'")
    lsr = th.trim(lsr, onset, None)
    rate_lsr = th.firing_rate(lsr)

    rates = {
        'hsr': rate_hsr,
        'msr': rate_msr,
        'lsr': rate_lsr,
    }

    return rates
def main():
    fs = 100e3
    cf = 500
    convergence = (35, 0, 0)

    # Generate sound
    sound = wv.ramped_tone(fs=fs, freq=cf, duration=50e-3, pad=30e-3, dbspl=50)

    # Run inner ear model
    anf_trains = cochlea.run_zilany2014(sound=sound,
                                        fs=fs,
                                        cf=cf,
                                        anf_num=convergence,
                                        species='cat',
                                        seed=0)

    # Run GBC
    cn.set_celsius(37)
    cn.set_fs(fs)

    gbc = cn.GBC_Point(convergence=convergence,
                       cf=cf,
                       endbulb_class='tonic',
                       record_voltages=True)

    gbc.load_anf_trains(anf_trains, seed=0)

    cn.run(duration=len(sound) / fs, objects=[gbc])

    # Collect the results
    gbc_trains = gbc.get_trains()
    voltages = gbc.get_voltages()

    # Present the results
    print(gbc_trains)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    ax[0].set_title("ANF input")

    th.plot_signal(voltages, fs=fs, ax=ax[1])

    th.show()
Example #11
0
def _run_model(model, dbspl, cf, model_pars):

    duration = 100e-3
    onset = 10e-3

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250,250,250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        pad=0,
        dbspl=dbspl
    )

    anf = model(
        sound=sound,
        cf=cf,
        **model_pars
    )

    hsr = anf[anf['type']=='hsr']
    hsr = th.trim(hsr, onset, None)
    rate_hsr = th.firing_rate(hsr)

    msr = anf[anf['type']=='msr']
    msr = th.trim(msr, onset, None)
    rate_msr = th.firing_rate(msr)

    lsr = anf[anf['type']=='lsr']
    lsr = th.trim(lsr, onset, None)
    rate_lsr = th.firing_rate(lsr)

    rates = {
        'hsr': rate_hsr,
        'msr': rate_msr,
        'lsr': rate_lsr,
    }

    return rates
Example #12
0
def main():

    fs = 100e3
    cf = 1e3
    tmax = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tmax,
        pad=30e-3,
        dbspl=50,
    )

    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        cf=cf,
        anf_num=(300, 0, 0),
        seed=0,
        species='cat',
    )

    anfs = cochlea.make_brian_group(anf_trains)

    print(anfs)


    brainstem = make_brainstem_group(100)

    print(brainstem)

    monitor = brian.SpikeMonitor(brainstem)


    net = brian.Network([brainstem, monitor])
    net.run(tmax*second)


    brian.raster_plot(monitor)
    brian.show()
Example #13
0
def error_func(dbspl,
               model,
               cf,
               spont_rate,
               model_pars,
               asr_filter=False,
               freq=None):

    pars = dict(model_pars)

    fs = pars.setdefault('fs', 100e3)
    pars.setdefault('seed', 0)
    pars.setdefault('anf_num', (1000, 0, 0))

    if freq is None:
        freq = cf

    tone_duration = 250e-3
    onset = 15e-3

    sound = wv.ramped_tone(fs,
                           freq,
                           duration=tone_duration,
                           ramp=2.5e-3,
                           pad=0,
                           dbspl=dbspl)

    if asr_filter:
        sound = adjust_to_human_thresholds(sound, fs, model)

    anf = model(sound=sound, cf=cf, **pars)

    trains = th.trim(anf, onset, None)
    rate = th.firing_rate(trains)

    error = rate - spont_rate

    logging.debug("{} {} {}".format(dbspl, rate, error))

    return error
Example #14
0
def _run_model(model, dbspl, cf, model_pars):

    duration = 100e-3
    onset = 10e-3

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250,250,250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        pad=0,
        dbspl=dbspl
    )

    anf = model(
        sound=sound,
        cf=cf,
        **model_pars
    )

    # th.plot_raster(anf)
    # th.show()

    ### We want to make sure the the output CF is equal to the desired
    ### CF.
    real_cf, = np.unique(anf['cf'])
    assert real_cf == cf


    vss = {}
    for typ,group in anf.groupby('type'):
        trimmed = th.trim(group, onset, None)
        vs = th.vector_strength(trimmed, cf)
        vss[typ] = vs


    return vss
Example #15
0
def main():

    fs = 100e3
    cf = 1e3
    tmax = 50e-3

    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=tmax,
        pad=30e-3,
        dbspl=50,
    )

    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        cf=cf,
        anf_num=(300, 0, 0),
        seed=0,
        species='cat',
    )

    anfs = cochlea.make_brian_group(anf_trains)

    print(anfs)

    brainstem = make_brainstem_group(100)

    print(brainstem)

    monitor = brian.SpikeMonitor(brainstem)

    net = brian.Network([brainstem, monitor])
    net.run(tmax * second)

    brian.raster_plot(monitor)
    brian.show()
Example #16
0
def _run_model(model, dbspl, cf, model_pars, tone_duration):

    onset = 10e-3
    assert tone_duration > onset

    fs = model_pars.setdefault('fs', 100e3)
    model_pars.setdefault('anf_num', (250, 250, 250))
    model_pars.setdefault('seed', 0)

    sound = wv.ramped_tone(fs=fs,
                           freq=cf,
                           duration=tone_duration,
                           pad=0,
                           dbspl=dbspl)

    anf = model(sound=sound, cf=cf, **model_pars)

    rates = {}
    for typ, trains in anf.groupby('type'):
        trimmed = th.trim(trains, onset, None)
        rate = th.firing_rate(trimmed)
        rates[typ] = rate

    return rates
def main():

    fs = 100e3  # s
    cf = 600  # Hz
    duration = 50e-3  # s

    # Simulation sampling frequency
    cn.set_fs(40e3)  # Hz

    # Generate sound
    sound = wv.ramped_tone(
        fs=fs,
        freq=cf,
        duration=duration,
        dbspl=30,
    )

    # Generate ANF trains
    anf_trains = cochlea.run_zilany2014(
        sound=sound,
        fs=fs,
        anf_num=(300, 0, 0),  # (HSR, MSR, LSR)
        cf=cf,
        species='cat',
        seed=0,
    )

    # Generate ANF and GBC groups
    anfs = cn.make_anf_group(anf_trains)
    gbcs = cn.make_gbc_group(100)

    # Connect ANFs and GBCs
    synapses = brian.Connection(
        anfs,
        gbcs,
        'ge_syn',
    )

    convergence = 20

    weight = cn.synaptic_weight(pre='anf', post='gbc', convergence=convergence)

    synapses.connect_random(anfs,
                            gbcs,
                            p=convergence / len(anfs),
                            fixed=True,
                            weight=weight)

    # Monitors for the GBCs
    spikes = brian.SpikeMonitor(gbcs)

    # Run the simulation
    cn.run(duration=len(sound) / fs, objects=[anfs, gbcs, synapses, spikes])

    # Present the results
    gbc_trains = th.make_trains(spikes)

    fig, ax = plt.subplots(2, 1)

    th.plot_raster(anf_trains, ax=ax[0])
    th.plot_raster(gbc_trains, ax=ax[1])

    plt.show()