Esempio n. 1
0
def test_equalization(speakers="all", title=""):
    """
    play chirp with and without the equalization filter and compare the
    results.
    """
    sig = slab.Sound.chirp(duration=0.05, from_freq=50, to_freq=16000)
    rec, rec_filt = [], []
    if speakers == "all":  # use the whole speaker table
        speaker_list = _speaker_table
    else:  # use a subset of speakers
        speaker_list = speakers_from_list(speakers)
    for row in speaker_list:
        rec.append(play_and_record(row[0], sig, apply_calibration=False))
        rec_filt.append(play_and_record(row[0], sig, apply_calibration=True))

    rec_filt = slab.Sound(rec_filt)
    rec = slab.Sound(rec)

    rec.data = rec.data[:, rec.level > _rec_tresh]
    rec_filt.data = rec_filt.data[:, rec_filt.level > _rec_tresh]

    fig, ax = plt.subplots(2, 2, sharex="col", sharey="col")
    fig.suptitle(title)
    spectral_range(rec, plot=ax[0, 0])
    spectral_range(rec_filt, plot=ax[1, 0])
    rec.spectrum(axes=ax[0, 1])
    rec_filt.spectrum(axes=ax[1, 1])
    plt.show()
    return rec, rec_filt
Esempio n. 2
0
def test_equalization(speakers="all"):
    """
    Test the effectiveness of the speaker equalization
    """
    if not PROCESSORS.mode == "play_rec":
        PROCESSORS.initialize_default(mode="play_rec")
    not_equalized = slab.Sound.whitenoise(duration=.5)
    # the recordings from the un-equalized, the level equalized and the fully equalized sounds
    rec_raw, rec_level, rec_full = [], [], []
    if speakers == "all":  # use the whole speaker table
        speakers = SPEAKERS
    else:
        speakers = pick_speakers(SPEAKERS)
    for speaker in speakers:
        level_equalized = apply_equalization(not_equalized,
                                             speaker=speaker,
                                             level=True,
                                             frequency=False)
        full_equalized = apply_equalization(not_equalized,
                                            speaker=speaker,
                                            level=True,
                                            frequency=True)
        rec_raw.append(play_and_record(speaker, not_equalized, equalize=False))
        rec_level.append(
            play_and_record(speaker, level_equalized, equalize=False))
        rec_full.append(
            play_and_record(speaker, full_equalized, equalize=False))
    return slab.Sound(rec_raw), slab.Sound(rec_level), slab.Sound(rec_full)
Esempio n. 3
0
def check_equalization(sig, speakers="all", max_diff=5, db_thresh=80):
    """
    Test the effectiveness of the speaker equalization
    """
    fig, ax = plt.subplots(3, 2, sharex=True)
    # recordings without, with level and with complete (level+frequency) equalization
    rec_raw, rec_lvl_eq, rec_freq_eq = [], [], []
    if speakers == "all":  # use the whole speaker table
        speaker_list = TABLE
    elif isinstance(speakers, list):  # use a subset of speakers
        speaker_list = get_speaker_list(speakers)
    else:
        raise ValueError("Speakers must be 'all' or a list of indices/coordinates!")
    for i in range(speaker_list.shape[0]):
        row = speaker_list.loc[i]
        sig2 = apply_equalization(sig, speaker=row.index_number, level=True, frequency=False)  # only level equalization
        sig3 = apply_equalization(sig, speaker=row.index_number, level=True, frequency=True)  # level and frequency
        rec_raw.append(play_and_record(row.index_number, sig, calibrate=False))
        rec_lvl_eq.append(play_and_record(row.index_number, sig2, calibrate=False))
        rec_freq_eq.append(play_and_record(row.index_number, sig3, calibrate=False))
    for i, rec in enumerate([rec_raw, rec_lvl_eq, rec_freq_eq]):
        rec = slab.Sound(rec)
        rec.data = rec.data[:, rec.level > db_thresh]
        rec.spectrum(axes=ax[i, 0], show=False)
        spectral_range(rec, plot=ax[i, 1], thresh=max_diff, log=False)
    plt.show()

    return slab.Sound(rec_raw), slab.Sound(rec_lvl_eq), slab.Sound(rec_freq_eq)
Esempio n. 4
0
def test_read_write():
    for _ in range(100):
        for normalize in [True, False]:
            sound = slab.Sound(numpy.random.randn(1000, 2),
                               samplerate=numpy.random.randint(100, 1000))
            if normalize is False:
                sound.data = sound.data / sound.data.max()
            sound.write(tmpdir / "sound.wav", normalise=True)
            loaded = slab.Sound(tmpdir / "sound.wav")
            loaded.level = sound.level
            numpy.testing.assert_almost_equal(sound.data,
                                              loaded.data,
                                              decimal=3)
def interference_block(jnd_room, jnd_voice, jnd_itd):
    '''
    Presents one condition block of the the interference test.
    Condition ... 'room', 'room+voice', 'room+itd', 'voice', or 'itd'
    default_room etc. ... the reference room, SER and ITD values.
    jnd_room etc. ... the room, SER and ITD values that are perceived as different from the default
                      (default value + measured jnd rounded to the nearest available stimulus)
    '''
    print('Three sounds are presented in each trial.')
    print('They are always different, but sometimes')
    print('one sound is played in a larger room,')
    print('and sometimes all three are played in the same room.')
    print('Was the larger room presented first, second, or third?')
    print('Press 1 for first, 2 for second, and 3 for third.')
    input('Press enter to start the test...')
    # set parameter values of conditions in named tuples -> list of these is used for slab.Trialsequence
    default = condition(voice=default_voice, room=default_room, itd=default_itd, label='default')
    room = condition(voice=default_voice, room=jnd_room, itd=default_itd, label='room')
    room_voice = condition(voice=jnd_voice, room=jnd_room, itd=default_itd, label='room_voice')
    room_itd = condition(voice=default_voice, room=jnd_room, itd=jnd_itd, label='room_itd')
    voice = condition(voice=jnd_voice, room=default_room, itd=default_itd, label='voice')
    itd = condition(voice=default_voice, room=default_room, itd=jnd_itd, label='itd')
    conditions = [default, room, room_voice, room_itd, voice, itd]
    trials = slab.Trialsequence(conditions=conditions, n_reps=10, kind='random_permutation')
    word_seq = slab.Trialsequence(conditions=word_list, kind='infinite', label='word_seq')
    hits = 0
    false_alarms = 0
    _results_file.write(f'interference block:', tag='time')
    for trial_parameters in trials:
        # load stimuli
        word  = next(word_seq)
        word2 = next(word_seq)
        word3 = next(word_seq)
        jnd_stim = slab.Sound(str(stim_folder / word  / word) + '_SER%.4g_GPR168_%i_%i.wav' % trial_parameters[:-1])
        default_stim1 = slab.Sound(str(stim_folder / word2 / word2) + '_SER%.4g_GPR168_%i_%i.wav' % default[:-1])
        default_stim2 = slab.Sound(str(stim_folder / word3 / word3) + '_SER%.4g_GPR168_%i_%i.wav' % default[:-1])
        trials.present_afc_trial(jnd_stim, [default_stim1, default_stim2], isi=ISI_stairs)
        response = trials.data[-1] # read out the last response
        if trial_parameters.label[:4] == 'room' and response: # hit!
            hits += 1
        elif trial_parameters.label[:3] in ['voi', 'itd'] and response: # false alarm!
            false_alarms += 1
        time.sleep(_after_stim_pause)
    hitrate = hits/trials.n_trials
    print(f'hitrate: {hitrate}')
    farate = false_alarms/trials.n_trials
    print(f'false alarm rate: {farate}')
    _results_file.write(repr(trials), tag='trials')
Esempio n. 6
0
def _frequency_equalization(sig, speaker_list, target_speaker, calibration_lvls, bandwidth,
                            low_cutoff, high_cutoff, alpha, db_thresh):
    """
    play the level-equalized signal, record and compute and a bank of inverse filter
    to equalize each speaker relative to the target one. Return filterbank and recordings
    """
    rec = []
    for i in range(speaker_list.shape[0]):
        row = speaker_list.loc[i]
        modulated_sig = deepcopy(sig)  # copy signal and correct for lvl difference
        modulated_sig.level *= calibration_lvls[row.index_number]
        rec.append(play_and_record(row.index_number, modulated_sig, apply_calibration=False))
        if row.index_number == target_speaker:
            target = rec[-1]
    rec = slab.Sound(rec)
    # set recordings which are below the threshold or which are from exluded speaker
    # equal to the target so that the resulting frequency filter will be flat
    rec.data[:, rec.level < db_thresh] = target.data

    filter_bank = slab.Filter.equalizing_filterbank(target=target, signal=rec, low_cutoff=low_cutoff,
                                                    high_cutoff=high_cutoff, bandwidth=bandwidth, alpha=alpha)
    # check for notches in the filter:
    transfer_function = filter_bank.tf(show=False)[1][0:900, :]
    if (transfer_function < -30).sum() > 0:
        logging.warning(f"The filter for speaker {row.index_number} at azimuth {row.azi} and elevation {row.ele} /n"
                        "contains deep notches - adjust the equalization parameters!")

    return filter_bank, rec
Esempio n. 7
0
def apply_equalization(signal, speaker, level=True, frequency=True):
    """
    Apply level correction and frequency equalization to a signal

    Args:
        signal: signal to calibrate
        speaker: index number, coordinates or row from the speaker table. Determines which calibration is used
    Returns:
        slab.Sound: calibrated copy of signal
    """
    if not bool(EQUALIZATIONDICT):
        logging.warning("Setup is not calibrated! Returning the signal unchanged...")
        return signal
    else:
        signal = slab.Sound(signal)
        if isinstance(speaker, (int, np.int64, np.int32)):
            speaker = get_speaker(index_number=speaker)
        elif isinstance(speaker, (list, tuple)):
            speaker = get_speaker(coordinates=speaker)
        elif not isinstance(speaker, (pd.Series, pd.DataFrame)):
            raise ValueError("Argument speaker must be a index number, coordinates or table row of a speaker!")
        speaker_calibration = EQUALIZATIONDICT[str(speaker.index_number.iloc[0])]
        calibrated_signal = deepcopy(signal)
        if level:
            calibrated_signal.level *= speaker_calibration["level"]
        if frequency:
            calibrated_signal = speaker_calibration["filter"].apply(calibrated_signal)
        return calibrated_signal
Esempio n. 8
0
def set_signal_and_speaker(signal, speaker, calibrate=True):
    """
    Load a signal into the processor buffer and set the output channel to match the speaker.
    The processor is chosen automatically depending on the speaker.

        Args:
            signal (array-like): signal to load to the buffer, must be one-dimensional
            speaker : speaker to play the signal from, can be index number or [azimuth, elevation]
            calibrate (bool): if True (=default) apply loudspeaker equalization
    """
    signal = slab.Sound(signal)
    if isinstance(speaker, (list, tuple)):
        speaker = get_speaker(coordinates=speaker)
    elif isinstance(speaker, (int, np.int64, np.int32)):
        speaker = get_speaker(index_number=speaker)
    elif isinstance(speaker, pd.Series):
        pass
    else:
        raise ValueError(f"Input {speaker} for argument speaker is not valid! \n"
                         "Specify either an index number or coordinates of the speaker!")
    if calibrate:
        logging.info('Applying calibration.')  # apply level and frequency calibration
        to_play = apply_equalization(signal, speaker)
    else:
        to_play = signal
    PROCESSORS.write(tag='chan', value=speaker.channel.iloc[0], procs=speaker.analog_proc.iloc[0])
    PROCESSORS.write(tag='data', value=to_play.data, procs=speaker.analog_proc.iloc[0])
    other_procs = list(TABLE["analog_proc"].unique())
    other_procs.remove(speaker.analog_proc.iloc[0])  # set the analog output of other procs to non existent number 99
    PROCESSORS.write(tag='chan', value=99, procs=other_procs)
Esempio n. 9
0
 def __init__(self, sounds, n=10):
     if isinstance(sounds, (list, tuple)):  # a list was passed, use as is
         list.__init__(self, sounds)
     elif callable(
             sounds
     ):  # a function to generate sound objects was passed, call the function n times
         list.__init__(self, [])
         for _ in range(int(n)):
             list.append(self, sounds())
     elif isinstance(
             sounds, str
     ):  # string is interpreted as name of a zip file containing the sounds
         with zipfile.ZipFile(sounds) as zip:
             files = zip.namelist()
             if files:
                 list.__init__(self, [])
                 for file in files:
                     list.append(self, slab.Sound(file))
     elif hasattr(sounds, '__iter__'
                  ):  # it's an iterable object, just iterate through it
         for sound in sounds:
             list.append(self, sound)
     else:
         raise TypeError('Unknown type for list argument.')
     self.previous = None  # this property holds the index of the previously played sound
     if not all(hasattr(sound, 'play') for sound in self):
         raise TypeError('Cannot play all of the provided items.')
Esempio n. 10
0
def apply_equalization(signal, speaker, level=True, frequency=True):
    """
    Apply level correction and frequency equalization to a signal

    Args:
        signal: signal to calibrate
        speaker: index number, coordinates or row from the speaker table. Determines which calibration is used
        level:
        frequency:
    Returns:
        slab.Sound: calibrated copy of signal
    """
    signal = slab.Sound(signal)
    speaker = pick_speakers(speaker)[0]
    equalized_signal = deepcopy(signal)
    if level:
        if speaker.level is None:
            raise ValueError(
                "speaker not level-equalized! Load an existing equalization of calibrate the setup!"
            )
        equalized_signal.level *= speaker.level
    if frequency:
        if speaker.filter is None:
            raise ValueError(
                "speaker not frequency-equalized! Load an existing equalization of calibrate the setup!"
            )
        equalized_signal = speaker.filter.apply(equalized_signal)
    return equalized_signal
Esempio n. 11
0
def _frequency_equalization(speakers, sound, reference_speaker,
                            calibration_levels, bandwidth, low_cutoff,
                            high_cutoff, alpha, threshold):
    """
    play the level-equalized signal, record and compute and a bank of inverse filter
    to equalize each speaker relative to the target one. Return filterbank and recordings
    """
    reference = play_and_record(reference_speaker, sound, equalize=False)
    recordings = []
    for speaker, level in zip(speakers, calibration_levels):
        attenuated = deepcopy(sound)
        attenuated.level *= level
        recordings.append(play_and_record(speaker, attenuated, equalize=False))
    recordings = slab.Sound(recordings)
    recordings.data[:, recordings.level < threshold] = reference.data
    filter_bank = slab.Filter.equalizing_filterbank(reference,
                                                    recordings,
                                                    low_cutoff=low_cutoff,
                                                    high_cutoff=high_cutoff,
                                                    bandwidth=bandwidth,
                                                    alpha=alpha)
    # check for notches in the filter:
    transfer_function = filter_bank.tf(show=False)[1][0:900, :]
    if (transfer_function < -30).sum() > 0:
        print(
            "Some of the equalization filters contain deep notches - try adjusting the parameters."
        )
    return filter_bank, recordings
Esempio n. 12
0
def set_signal_and_speaker(signal, speaker, equalize=True):
    """
    Load a signal into the processor buffer and set the output channel to match the speaker.
    The processor is chosen automatically depending on the speaker.

        Args:
            signal (array-like): signal to load to the buffer, must be one-dimensional
            speaker (Speaker, int) : speaker to play the signal from, can be index number or [azimuth, elevation]
            equalize (bool): if True (=default) apply loudspeaker equalization
    """
    signal = slab.Sound(signal)
    speaker = pick_speakers(speaker)[0]
    if equalize:
        logging.info(
            'Applying calibration.')  # apply level and frequency calibration
        to_play = apply_equalization(signal, speaker)
    else:
        to_play = signal
    PROCESSORS.write(tag='chan',
                     value=speaker.analog_channel,
                     processors=speaker.analog_proc)
    PROCESSORS.write(tag='data',
                     value=to_play.data,
                     processors=speaker.analog_proc)
    other_procs = set([s.analog_proc for s in SPEAKERS])
    other_procs.remove(
        speaker.analog_proc
    )  # set the analog output of other processors to non existent number 99
    PROCESSORS.write(tag='chan', value=99, processors=other_procs)
Esempio n. 13
0
def test_set_signal_and_speaker():
    signals = [
        np.random.random(size=1000),
        slab.Sound(np.random.random(size=1000))
    ]
    speakers = range(47)
    for signal in signals:
        for speaker in speakers:
            freefield.set_signal_and_speaker(signal, speaker, equalize=False)
Esempio n. 14
0
def play_and_record(speaker,
                    sound,
                    compensate_delay=True,
                    compensate_attenuation=False,
                    equalize=True):
    """
    Play the signal from a speaker and return the recording. Delay compensation
    means making the buffer of the recording processor n samples longer and then
    throwing the first n samples away when returning the recording so sig and
    rec still have the same length. For this to work, the circuits rec_buf.rcx
    and play_buf.rcx have to be initialized on RP2 and RX8s and the mic must
    be plugged in.
    Parameters:
        speaker: integer between 1 and 48, index number of the speaker
        sound: instance of slab.Sound, signal that is played from the speaker
        compensate_delay: bool, compensate the delay between play and record
        compensate_attenuation:
        equalize:
    Returns:
        rec: 1-D array, recorded signal
    """
    write(tag="playbuflen", value=sound.n_samples, processors=["RX81", "RX82"])
    if compensate_delay:
        n_delay = get_recording_delay(play_from="RX8", rec_from="RP2")
        n_delay += 50  # make the delay a bit larger to avoid missing the sound's onset
    else:
        n_delay = 0
    write(tag="playbuflen", value=sound.n_samples, processors=["RX81", "RX82"])
    write(tag="playbuflen", value=sound.n_samples + n_delay, processors="RP2")
    set_signal_and_speaker(sound, speaker, equalize)
    play()
    wait_to_finish_playing()
    if PROCESSORS.mode == "play_rec":  # read the data from buffer and skip the first n_delay samples
        rec = read(tag='data',
                   processor='RP2',
                   n_samples=sound.n_samples + n_delay)[n_delay:]
        rec = slab.Sound(rec)
    elif PROCESSORS.mode == "play_birec":  # read data for left and right ear from buffer
        rec_l = read(tag='datal',
                     processor='RP2',
                     n_samples=sound.n_samples + n_delay)[n_delay:]
        rec_r = read(tag='datar',
                     processor='RP2',
                     n_samples=sound.n_samples + n_delay)[n_delay:]
        rec = slab.Binaural([rec_l, rec_r])
    else:
        raise ValueError(
            "Setup must be initialized in mode 'play_rec' or 'play_birec'!")
    if compensate_attenuation:
        if isinstance(rec, slab.Binaural):
            iid = rec.left.level - rec.right.level
            rec.level = sound.level
            rec.left.level += iid
        else:
            rec.level = sound.level
    return rec
Esempio n. 15
0
def play_and_record(speaker_nr,
                    sig,
                    compensate_delay=True,
                    apply_calibration=False):
    """
    Play the signal from a speaker and return the recording. Delay compensation
    means making the buffer of the recording device n samples longer and then
    throwing the first n samples away when returning the recording so sig and
    rec still have the same legth. For this to work, the circuits rec_buf.rcx
    and play_buf.rcx have to be initialized on RP2 and RX8s and the mic must
    be plugged in.
    Parameters:
        speaker_nr: integer between 1 and 48, index number of the speaker
        sig: instance of slab.Sound, signal that is played from the speaker
        compensate_delay: bool, compensate the delay between play and record
    Returns:
        rec: 1-D array, recorded signal
    """
    # TODO use binaural class for binaural recordings

    if _mode == "binaural_recording":
        binaural = True  # 2 channel recording
    elif _mode == "play_and_record":
        binaural = False  # record single channle
    else:
        raise ValueError("Setup must be initalized in 'play_and_record' for "
                         "single or 'binaural' for two channel recording!"
                         "\n current mode is %s" % (_mode))
    set_variable(variable="playbuflen", value=sig.nsamples, proc="RX8s")
    if compensate_delay:
        n_delay = get_recording_delay(play_device="RX8", rec_device="RP2")
    else:
        n_delay = 0
    set_variable(variable="playbuflen", value=sig.nsamples, proc="RX8s")
    set_variable(variable="playbuflen",
                 value=sig.nsamples + n_delay,
                 proc="RP2")
    set_signal_and_speaker(sig, speaker_nr, apply_calibration)
    trigger()  # start playing and wait
    wait_to_finish_playing(proc="all")
    if binaural is False:
        rec = get_variable(variable='data',
                           proc='RP2',
                           n_samples=sig.nsamples + n_delay)[n_delay:]
    if binaural is True:
        recl = get_variable(variable='datal',
                            proc='RP2',
                            n_samples=sig.nsamples + n_delay)[n_delay:]
        recr = get_variable(variable='datar',
                            proc='RP2',
                            n_samples=sig.nsamples + n_delay)[n_delay:]
        rec = [recl, recr]
    return slab.Sound(rec)  # names for channels?
Esempio n. 16
0
 def test_set_signal_and_speaker(self):
     # TODO: test applying calibration
     signals = [
         np.random.random(size=1000),
         slab.Sound(np.random.random(size=1000))
     ]
     speakers = [np.random.randint(0, 47), [0, 0]]
     procs = ["RX81", "RX82"]
     for signal in signals:
         for proc in procs:
             for speaker in speakers:
                 main.set_signal_and_speaker(signal, speaker, proc)
Esempio n. 17
0
def _level_equalization(sig, speaker_list, target_speaker):
    """
    Record the signal from each speaker in the list and return the level of each
    speaker relative to the target speaker(target speaker must be in the list)
    """
    rec = []
    for row in speaker_list:
        rec.append(play_and_record(row[0], sig, apply_calibration=False))
        if row[0] == target_speaker:
            target = rec[-1]
    rec = slab.Sound(rec)
    rec.data[:, rec.level < _rec_tresh] = target.data  # thresholding
    return target.level / rec.level
Esempio n. 18
0
def _level_equalization(sig, speaker_list, target_speaker, db_thresh):
    """
    Record the signal from each speaker in the list and return the level of each
    speaker relative to the target speaker(target speaker must be in the list)
    """
    rec = []
    for i in range(speaker_list.shape[0]):
        row = speaker_list.loc[i]
        rec.append(play_and_record(row.index_number, sig, apply_calibration=False))
        if row.index_number == target_speaker:
            target = rec[-1]
    rec = slab.Sound(rec)
    rec.data[:, rec.level < db_thresh] = target.data  # thresholding
    return target.level / rec.level
Esempio n. 19
0
def _level_equalization(speakers, sound, reference_speaker, threshold):
    """
    Record the signal from each speaker in the list and return the level of each
    speaker relative to the target speaker(target speaker must be in the list)
    """
    target_recording = play_and_record(reference_speaker,
                                       sound,
                                       equalize=False)
    recordings = []
    for speaker in speakers:
        recordings.append(play_and_record(speaker, sound, equalize=False))
    recordings = slab.Sound(recordings)
    recordings.data[:, recordings.
                    level < threshold] = target_recording.data  # thresholding
    return target_recording.level / recordings.level
Esempio n. 20
0
def play_and_record(speaker_nr, sig, compensate_delay=True, compensate_level=True, calibrate=False):
    """
    Play the signal from a speaker and return the recording. Delay compensation
    means making the buffer of the recording processor n samples longer and then
    throwing the first n samples away when returning the recording so sig and
    rec still have the same legth. For this to work, the circuits rec_buf.rcx
    and play_buf.rcx have to be initialized on RP2 and RX8s and the mic must
    be plugged in.
    Parameters:
        speaker_nr: integer between 1 and 48, index number of the speaker
        sig: instance of slab.Sound, signal that is played from the speaker
        compensate_delay: bool, compensate the delay between play and record
    Returns:
        rec: 1-D array, recorded signal
    """
    if PROCESSORS.mode == "play_birec":
        binaural = True  # 2 channel recording
    elif PROCESSORS.mode == "play_rec":
        binaural = False  # record single channel
    else:
        raise ValueError("Setup must be initialized in mode 'play_rec' or 'play_birec'!")
    PROCESSORS.write(tag="playbuflen", value=sig.nsamples, procs=["RX81", "RX82"])
    if compensate_delay:
        n_delay = get_recording_delay(play_from="RX8", rec_from="RP2")
        n_delay += 50  # make the delay a bit larger, just to be sure
    else:
        n_delay = 0
    PROCESSORS.write(tag="playbuflen", value=sig.nsamples, procs=["RX81", "RX82"])
    PROCESSORS.write(tag="playbuflen", value=sig.nsamples + n_delay, procs="RP2")
    set_signal_and_speaker(sig, speaker_nr, calibrate)
    play_and_wait()
    if binaural is False:  # read the data from buffer and skip the first n_delay samples
        rec = PROCESSORS.read(tag='data', proc='RP2', n_samples=sig.nsamples + n_delay)[n_delay:]
        rec = slab.Sound(rec)
    else:  # read data for left and right ear from buffer
        rec_l = PROCESSORS.read(tag='datal', proc='RP2', n_samples=sig.nsamples + n_delay)[n_delay:]
        rec_r = PROCESSORS.read(tag='datar', proc='RP2', n_samples=sig.nsamples + n_delay)[n_delay:]
        rec = slab.Binaural([rec_l, rec_r])
    if compensate_level:
        if binaural:
            iid = rec.left.level - rec.right.level
            rec.level = sig.level
            rec.left.level += iid
        else:
            rec.level = sig.level
    return rec
Esempio n. 21
0
def _frequency_equalization(sig, speaker_list, target_speaker, bandwidth,
                            freq_range):
    """
    play the level-equalized signal, record and compute and a bank of inverse filter
    to equalize each speaker relative to the target one. Return filterbank and recordings
    """
    rec = []
    for row in speaker_list:
        modulated_sig = deepcopy(sig)
        modulated_sig.level *= _calibration_levels[int(row[0])]
        rec.append(
            play_and_record(row[0], modulated_sig, apply_calibration=False))
        if row[0] == target_speaker:
            target = rec[-1]
    rec = slab.Sound(rec)
    rec.data[:, rec.level < _rec_tresh] = target.data  # thresholding
    fbank = slab.Filter.equalizing_filterbank(target=target,
                                              signal=rec,
                                              low_lim=freq_range[0],
                                              hi_lim=freq_range[1],
                                              bandwidth=bandwidth)
    return fbank, rec
Esempio n. 22
0
def test_sound_generation():
    # numpy.ndarray | str | pathlib.Path | list
    for _ in range(100):
        data = numpy.ones([10, 2])
        sound = slab.Sound(data, samplerate=10)  # generate sound from array
        sound1 = slab.Sound(
            [data, data], samplerate=10)  # generate sound from list of arrays
        sound2 = slab.Sound([sound, sound])  # generate from list of sounds
        # sound1 and sound 2 should be the same:
        numpy.testing.assert_equal(sound1.data, sound2.data)
        numpy.testing.assert_equal(sound1.times, sound2.times)
        assert sound1.samplerate == sound2.samplerate
        assert sound1.duration == sound2.duration
        assert sound1.duration == sound2.duration
        # test if saving the file and initializing from string / path works. The reading and writing of data
        # is tested in more detail in test_read_write()
        sound = slab.Sound(numpy.random.randn(1000, 2),
                           samplerate=numpy.random.randint(100, 1000))
        sound.write(tmpdir / "sound.wav", normalise=False)
        loaded1 = slab.Sound(tmpdir / "sound.wav")
        loaded2 = slab.Sound(str(tmpdir / "sound.wav"))
        numpy.testing.assert_equal(loaded1.data, loaded2.data)
        numpy.testing.assert_equal(loaded1.times, loaded2.times)
def jnd(condition, practise=False):
    '''
    Presents a staricase for a 2AFC task and returns the threshold.
    This threshold is used in the main experiment as jnd.
    condition ... 'room', voice', or 'itd'
    '''
    print('Three sounds are presented in each trial.')
    print('They are always different, but sometimes')
    if condition == 'room':
        print('one sound is played in a larger room,')
        print('and sometimes all three are played in the same room.')
        print('Was the larger room presented first, second, or third?')
    elif condition == 'voice':
        print('one is spoken by a different (larger) person,')
        print('and sometimes all three are spoken by the same person.')
        print('Was the larger person presented first, second, or third?')
    elif condition == 'itd':
        print('one is played from a different direction (slightly to the left),')
        print('and sometimes all three are played from straight ahead.')
        print('Was the sound slightly from the left played first, second, or third?')
    else:
        raise ValueError(f'Invalid condition {condition}.')
    print('Press 1 for first, 2 for second, 3 for third.')
    print('The difference will get more and more difficult to hear.')
    input('Press enter to start JND estimation...')
    repeat = 'r'
    condition_values = globals()[condition+'s'] # get the parameter list (vars rooms, voices, or itds) from condition string
    while repeat == 'r':
        # make a random, non-repeating list of words to present during the staircase
        word_seq = slab.Trialsequence(conditions=word_list, kind='infinite', label='word_seq')
        # define the staircase
        if practise:
            stairs = slab.Staircase(start_val=len(condition_values)-1, n_reversals=3,
                                step_sizes=[4, 3, 2], min_val=0, max_val=len(condition_values)-1, n_up=1, n_down=1, n_pretrials=0)
        else:
            stairs = slab.Staircase(start_val=len(condition_values)-4, n_reversals=15,
                                step_sizes=[4, 2], min_val=0, max_val=len(condition_values)-1, n_up=1, n_down=2, step_up_factor=1.5, n_pretrials=1) # should give approx. 70% hitrate
            _results_file.write(f'{condition} jnd:', tag='time')
        for trial in stairs:
            current = condition_values[int(trial)]
            # load stimuli
            word = next(word_seq)
            word2 = next(word_seq)
            word3 = next(word_seq)
            if condition == 'room':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{default_voice:.4g}_GPR168_{current}_{default_itd}.wav')
            elif condition == 'voice':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{current:.4g}_GPR168_{default_room}_{default_itd}.wav')
            elif condition == 'itd':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{default_voice:.4g}_GPR168_{default_room}_{current}.wav')
            default_stim1 = slab.Sound(stim_folder / word2 / f'{word2}_SER{default_voice:.4g}_GPR168_{default_room}_{default_itd}.wav')
            default_stim2 = slab.Sound(stim_folder / word3 / f'{word3}_SER{default_voice:.4g}_GPR168_{default_room}_{default_itd}.wav')
            stairs.present_afc_trial(jnd_stim, [default_stim1, default_stim2], isi=ISI_stairs, print_info=practise)
            if practise:
                stairs.plot()
        thresh = stairs.threshold()
        thresh_condition_value = condition_values[numpy.ceil(thresh).astype('int')]
        if practise:
            stairs.close_plot()
        else:
            print(f'room jnd: {round(thresh, ndigits=1)}')
            _results_file.write(repr(stairs), tag=f'stairs {condition}')
            _results_file.write(thresh, tag=f'jnd {condition}')
            _results_file.write(thresh_condition_value, tag=f'jnd condition value {condition}')
        repeat = input('Press enter to continue, "r" to repeat this threshold measurement.\n\n')
    return thresh_condition_value