def test_sequence_from_trials():
    for i in range(100):  # generate from integers
        start = numpy.random.randint(1, 100)
        stop = start+numpy.random.randint(1, 10)
        trials = [numpy.random.randint(start, stop) for i in range(100)]
        sequence = slab.Trialsequence(trials=trials)
        assert all(numpy.unique(sequence.trials) == numpy.array(range(1, sequence.n_conditions+1)))
    trials = ["a", "x", "x", "z", "a", "a", "a", "z"]
    sequence = slab.Trialsequence(trials=trials)
    assert all(numpy.unique(sequence.trials) == numpy.array(range(1, sequence.n_conditions + 1)))
    sounds = [slab.Sound.pinknoise(), slab.Sound.whitenoise()]
    trials = [random.choice(sounds) for i in range(50)]
    sequence = slab.Trialsequence(trials=trials)
    assert all(numpy.unique(sequence.trials) == numpy.array(range(1, sequence.n_conditions + 1)))
def interference_block(jnd_room, jnd_voice, jnd_itd):
    '''
    Presents one condition block of the the interference test.
    Condition ... 'room', 'room+voice', 'room+itd', 'voice', or 'itd'
    default_room etc. ... the reference room, SER and ITD values.
    jnd_room etc. ... the room, SER and ITD values that are perceived as different from the default
                      (default value + measured jnd rounded to the nearest available stimulus)
    '''
    print('Three sounds are presented in each trial.')
    print('They are always different, but sometimes')
    print('one sound is played in a larger room,')
    print('and sometimes all three are played in the same room.')
    print('Was the larger room presented first, second, or third?')
    print('Press 1 for first, 2 for second, and 3 for third.')
    input('Press enter to start the test...')
    # set parameter values of conditions in named tuples -> list of these is used for slab.Trialsequence
    default = condition(voice=default_voice, room=default_room, itd=default_itd, label='default')
    room = condition(voice=default_voice, room=jnd_room, itd=default_itd, label='room')
    room_voice = condition(voice=jnd_voice, room=jnd_room, itd=default_itd, label='room_voice')
    room_itd = condition(voice=default_voice, room=jnd_room, itd=jnd_itd, label='room_itd')
    voice = condition(voice=jnd_voice, room=default_room, itd=default_itd, label='voice')
    itd = condition(voice=default_voice, room=default_room, itd=jnd_itd, label='itd')
    conditions = [default, room, room_voice, room_itd, voice, itd]
    trials = slab.Trialsequence(conditions=conditions, n_reps=10, kind='random_permutation')
    word_seq = slab.Trialsequence(conditions=word_list, kind='infinite', label='word_seq')
    hits = 0
    false_alarms = 0
    _results_file.write(f'interference block:', tag='time')
    for trial_parameters in trials:
        # load stimuli
        word  = next(word_seq)
        word2 = next(word_seq)
        word3 = next(word_seq)
        jnd_stim = slab.Sound(str(stim_folder / word  / word) + '_SER%.4g_GPR168_%i_%i.wav' % trial_parameters[:-1])
        default_stim1 = slab.Sound(str(stim_folder / word2 / word2) + '_SER%.4g_GPR168_%i_%i.wav' % default[:-1])
        default_stim2 = slab.Sound(str(stim_folder / word3 / word3) + '_SER%.4g_GPR168_%i_%i.wav' % default[:-1])
        trials.present_afc_trial(jnd_stim, [default_stim1, default_stim2], isi=ISI_stairs)
        response = trials.data[-1] # read out the last response
        if trial_parameters.label[:4] == 'room' and response: # hit!
            hits += 1
        elif trial_parameters.label[:3] in ['voi', 'itd'] and response: # false alarm!
            false_alarms += 1
        time.sleep(_after_stim_pause)
    hitrate = hits/trials.n_trials
    print(f'hitrate: {hitrate}')
    farate = false_alarms/trials.n_trials
    print(f'false alarm rate: {farate}')
    _results_file.write(repr(trials), tag='trials')
Esempio n. 3
0
def familiarization():
    """
    Presents the familiarization stimuli (100% modulation depth, random direction)
    """
    print('Familiarization: sounds moving left or right are presented. \n'
          'The direction should be easy to hear. \n'
          'Press 1 for left, 2 for right.\n'
          'Press enter to start familiarization (2min)...')
    repeat = 'r'
    while repeat == 'r':
        trials = slab.Trialsequence(conditions=[-1, 1],
                                    n_reps=10,
                                    kind='random_permutation')
        responses = []
        _results_file.write('familiarization:', tag='time')
        for direction in trials:
            stim = moving_gaussian(speed=_adaptor_speed,
                                   snr=100,
                                   direction=direction)
            stim.play()  # present
            with slab.psychoacoustics.key() as key:  # and get response
                resp = key.getch()
            if dir == 'left':  # transform response: left = key '1', right = key '2'
                resp = resp == 49
            else:
                resp = resp == 50
            responses.append(resp)
            time.sleep(_after_stim_pause)
        # compute hitrate
        hitrate = sum(responses) / trials.n_trials  #wie funktioniert das???
        print(f'hitrate: {hitrate}')
        _results_file.write(hitrate, tag='hitrate')
        repeat = input(
            'Press enter to continue, "r" to repeat familiarization.')
    return hitrate
Esempio n. 4
0
def calibrate_camera_no_visual(targets, n_reps=1, n_images=5):
    """
    This is an alteration of calibrate_camera for cases in which LEDs are
    not available. The list of targets is repeated n_reps times in the
    exact same order without any reandomization. When the whole setup is
    equipped with LEDs this function should be removed
    """
    if not isinstance(CAMERAS, camera.Cameras):
        raise ValueError("Camera must be initialized before calibration!")
    coords = pd.DataFrame(columns=["azi_cam", "azi_world", "ele_cam", "ele_world", "cam", "frame", "n"])
    if not PROCESSORS.mode == "cam_calibration":
        PROCESSORS.initialize( ['RP2', 'RP2',  DIR/'data'/'rcx'/'button.rcx'], True, "GB")
    # this is a bit of a hack: every trial is it's own condition and they are sorted in the end
    targets = pd.concat([targets]*n_reps)
    targets = targets.reset_index()
    targets = [targets.loc[i] for i in targets.index]
    seq = slab.Trialsequence(n_reps=1, conditions=targets)
    seq.trials.sort()
    for trial in seq:
        logging.info(f"trial nr {seq.this_n}: \n target at elevation of {trial.ele} and azimuth of {trial.azi}")
        wait_for_button()
        pose = CAMERAS.get_headpose(average=False, convert=False, n=n_images)
        pose.insert(0, "n", seq.this_n)
        pose = pose.rename(columns={"azi": "azi_cam", "ele": "ele_cam"})
        pose.insert(2, "ele_world", trial.ele)
        pose.insert(4, "azi_world", trial.azi)
        pose = pose.dropna()
        coords = coords.append(pose, ignore_index=True, sort=True)
    CAMERAS.calibrate(coords, plot=True)
    return coords
Esempio n. 5
0
def calibrate_camera(targets, n_reps=1, n_images=5):
    """
    Calibrate all cameras by lighting up a series of LEDs and estimate the pose when the head is pointed
    towards the currently lit LED. This results in a list of world and camera coordinates which is used to
    calibrate the cameras.

    Args:
        targets (pandas DataFrame): rows from the speaker table. The speakers must have a LED attached
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
    Returns:
        pandas DataFrame: camera and world coordinates acquired (calibration is performed automatically)
    """
    if not isinstance(CAMERAS, camera.Cameras):
        raise ValueError("Camera must be initialized before calibration!")
    coords = pd.DataFrame(columns=["azi_cam", "azi_world", "ele_cam", "ele_world", "cam", "frame", "n"])
    if not PROCESSORS.mode == "cam_calibration":  # initialize setup in camera calibration mode
        PROCESSORS.initialize_default(mode="cam_calibration")
    targets = [targets.loc[i] for i in targets.index]
    seq = slab.Trialsequence(n_reps=n_reps, conditions=targets)
    for trial in seq:
        logging.info(f"trial nr {seq.this_n}: \n target at elevation of {trial.ele} and azimuth of {trial.azi}")
        PROCESSORS.write(tag="bitmask", value=int(trial.bit), procs=trial.digital_proc)
        wait_for_button()
        pose = CAMERAS.get_headpose(average=False, convert=False, n=n_images)
        pose.insert(0, "n", seq.this_n)
        pose = pose.rename(columns={"azi": "azi_cam", "ele": "ele_cam"})
        pose.insert(2, "ele_world", trial.ele)
        pose.insert(4, "azi_world", trial.azi)
        pose = pose.dropna()
        coords = coords.append(pose, ignore_index=True, sort=True)
        PROCESSORS.write(tag="bitmask", value=0, procs=trial.digital_proc)
    CAMERAS.calibrate(coords, plot=True)
    return coords
Esempio n. 6
0
def localization_test_freefield(speakers,
                                duration=0.5,
                                n_reps=1,
                                n_images=5,
                                visual=False):
    """
    Run a basic localization test where the same sound is played from different
    speakers in randomized order, without playing the same position twice in
    a row. After every trial the presentation is paused and the listener has
    to localize the sound source by pointing the head towards the source and
    pressing the response button. The cameras need to be calibrated before the
    test! After every trial the listener has to point to the middle speaker at
    0 elevation and azimuth and press the button to indicate the next trial.

    Args:
        speakers : rows from the speaker table or index numbers of the speakers.
        duration (float): duration of the noise played from the target positions in seconds
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
        visual(bool): If True, light a LED at the target position - the speakers must have a LED attached
    Returns:
        instance of slab.Trialsequence: the response is stored in the data attribute as tuples with (azimuth, elevation)
    """
    speakers = pick_speakers(speakers)
    if not PROCESSORS.mode == "loctest_freefield":
        PROCESSORS.initialize_default(mode="loctest_freefield")
    if visual is True:
        if not all([s.digital_channel for s in speakers]):
            raise ValueError(
                "All speakers must have a LED attached for a test with visual cues"
            )
    seq = slab.Trialsequence(speakers, n_reps, kind="non_repeating")
    play_start_sound()
    for speaker in seq:
        wait_for_button()
        while check_pose(fix=[0, 0]) is None:  # check if head is in position
            play_warning_sound()
            wait_for_button()
        sound = slab.Sound.pinknoise(duration=duration)
        write(tag="playbuflen",
              value=sound.n_samples,
              processors=["RX81", "RX82"])
        if visual is True:  # turn LED on
            write(tag="bitmask",
                  value=speaker.digital_channel,
                  processors=speaker.digital_proc)
        set_signal_and_speaker(signal=sound.data.flatten(), speaker=speaker)
        play()
        wait_to_finish_playing()
        wait_for_button()
        pose = get_head_pose(n_images=n_images)
        if visual is True:  # turn LED off
            write(tag="bitmask", value=0, processors=speaker.digital_proc)
        seq.add_response(pose)
    play_start_sound()
    # change conditions property so it contains the only azimuth and elevation of the source
    seq.conditions = np.array([(s.azimuth, s.elevation)
                               for s in seq.conditions])
    return seq
def test_results():
    slab.psychoacoustics.results_folder = PATH
    results = slab.ResultsFile(subject="MrPink")
    for data in [[1, 2, 3], slab.Trialsequence()]:
        results.write(data)
    results.read()
    results = slab.ResultsFile.read_file(slab.ResultsFile.previous_file(subject="MrPink"))
    results.clear()
Esempio n. 8
0
def main_experiment(subject=None):
    global _results_file
    # set up the results file
    if not subject:
        subject = input('Enter subject code: ')
    _results_file = slab.Resultsfile(subject=subject)
    # _ = familiarization() # run the familiarization, the hitrate is saved in the results file
    practice_stairs()  # run the stairs practice
    print(
        'The main part of the experiment starts now (motion direction thresholds).'
    )
    adapter_list = make_adapters()
    # results table with three colums: speed, jnd_no_adapter, jnd_adapter
    jnds = numpy.zeros((len(_speeds), 3, _n_blocks_per_speed))
    repeats = list(
    )  # for collecting conditions marked for re-measuring due to large differences in jnds
    for i in range(_n_blocks_per_speed):  # if larger t
        speed_seq = slab.Trialsequence(conditions=_speeds,
                                       n_reps=1,
                                       kind='random_permutation')
        for speed in speed_seq:
            idx = _speeds.index(
                speed
            )  # index of the current speed value, used for results table
            jnds[idx, 0, :] = speed
            if numpy.random.choice(
                (True, False)):  # presented without adapters first
                # each call to jnd prints instructions and saves to the results file
                jnds[idx, 1, i] = jnd(speed)
                jnds[idx, 2, i] = jnd(speed, adapter_list)
            else:  # with adapters first
                jnds[idx, 2, i] = jnd(speed, adapter_list)
                jnds[idx, 1, i] = jnd(speed)
            if i == 1:
                # if measurements too different, mark for repeat
                if abs(jnds[idx, 1, 1] - jnds[idx, 1, 0]) > _jnd_diff_thresh:
                    repeats.append((speed, False))
                    print(
                        f'{speed} no adapter: Difference to first JND too large. Marked for repetition!'
                    )
                # if measurements too different, mark for repeat
                if abs(jnds[idx, 2, 1] - jnds[idx, 2, 0]) > _jnd_diff_thresh:
                    repeats.append((speed, True))
                    print(
                        f'{speed} with adapters: Difference to first JND too large. Marked for repetition!'
                    )
        # save a string representation of the numpy results table
        _results_file.write(str(jnds[:, :, i]), tag=f'results round {i}')
    # rerun the conditions marked for repeated measurements
    if repeats:
        print('Repeating marked JND measurements.')
        for speed, adapters in repeats:  # these will just be tagged in the results file, not in the tables
            if adapters:
                jnd(speed, adapter_list)
            else:
                jnd(speed)
Esempio n. 9
0
def main_experiment(subject=None):
    global _results_file
    # set up the results file
    if not subject:
        subject = input('Enter subject code: ')
    _results_file = slab.ResultsFile(subject=subject, folder=_results_folder)

    print('Make adaptors and results-table...')
    spatial_adaptor_precomp = make_spatial_adaptors()
    print('spatial_adaptors completed!')
    binaural_adaptor_precomp = make_binaural_adaptors()
    print('binaural_adaptors completed!')

    adaptor_types = [spatial_adaptor_precomp, binaural_adaptor_precomp, None]
    adaptor_names = ['spatial adaptor', 'binaural adaptor', 'no adaptor']
    jnds = numpy.zeros((len(adaptor_types), _n_blocks_per_adaptorversion))
    repeats = list()

    familiarization()
    practice_stairs()

    print()
    print(
        'The main part of the experiment starts now (motion direction thresholds).'
    )
    _results_file.write('main experiment', tag='time')
    for i in range(_n_blocks_per_adaptorversion):
        adaptor_types_seq = slab.Trialsequence(conditions=(0, 1, 2), n_reps=1)
        for idx in adaptor_types_seq:
            print('Adaptor: ', adaptor_names[idx])
            thresh = jnd(adaptor=adaptor_types[idx])
            jnds[idx, i] = thresh
            print(f'jnd for {adaptor_names[idx]}: {round(thresh, ndigits=1)}')
            _results_file.write(thresh, tag=adaptor_names[idx])

        # save a string representation of the numpy results table
        _results_file.write(str(jnds), tag=f'results round {i}')

        if i == 1:
            if abs(jnds[idx, 1] - jnds[idx, 0]) > _jnd_diff_thresh:
                repeats.append(idx)
                print(
                    'Difference to first JND too large. Marked for repetition!'
                )

    for idx in repeats:
        print(
            'Repetition of measurement with large diference between JNDs with same conditions'
        )
        jnd(adaptor=adaptor_types[idx])
def test_deviants():
    for i in range(100):
        conditions = 4
        n_reps = 50
        n_trials = conditions*n_reps
        deviant_frequency = 0.2 * random.random() + .05
        sequence = slab.Trialsequence(conditions=4, n_reps=50, deviant_freq=deviant_frequency)
        count_deviants = 0
        for trial in sequence:
            if trial == 0:
                count_deviants += 1
            else:
                assert trial == sequence.conditions[sequence.trials[sequence.this_n] - 1]
        assert count_deviants == sequence.trials.count(0) == int(n_trials*deviant_frequency)
Esempio n. 11
0
def main_experiment(subject=None):
    '''
    A complex spatially extended moving sound is generated ('moving_gaussian'). This stimulus simulates the acoustics of a free-field loudspeaker arc.  A gaussian profile moves from left to right or right to left across the virtual speaker array and the speed of the movement and modulation depth (across space) can be varied. Detection thresholds for motion direction are measured at different motion speeds. Then the effect of adaptation by a long moving adapter at one speed on the detectability of motion at different speeds is measured.

    This experiment showcases complex stimulus generation and staircases, among others.
    '''
    global _results_file
    # set up the results file
    if not subject:
        subject = input('Enter subject code: ')
    _results_file = slab.ResultsFile(subject=subject)
    _ = familiarization() # run the familiarization, the hitrate is saved in the results file
    practice_stairs()  # run the stairs practice
    print('The main part of the experiment starts now (motion direction thresholds).')
    adapter_list = make_adapters()
    # results table with three colums: speed, jnd_no_adapter, jnd_adapter
    jnds = numpy.zeros((len(_speeds), 3, _n_blocks_per_speed))
    repeats = list()  # for collecting conditions marked for re-measuring due to large differences in jnds
    for i in range(_n_blocks_per_speed):  # if larger t
        speed_seq = slab.Trialsequence(conditions=_speeds, n_reps=1, kind='random_permutation')
        for speed in speed_seq:
            idx = _speeds.index(speed)  # index of the current speed value, used for results table
            jnds[idx, 0, :] = speed
            if numpy.random.choice((True, False)):  # presented without adapters first
                # each call to jnd prints instructions and saves to the results file
                jnds[idx, 1, i] = jnd(speed)
                jnds[idx, 2, i] = jnd(speed, adapter_list)
            else:  # with adapters first
                jnds[idx, 2, i] = jnd(speed, adapter_list)
                jnds[idx, 1, i] = jnd(speed)
            if i == 1:
                # if measurements too different, mark for repeat
                if abs(jnds[idx, 1, 1] - jnds[idx, 1, 0]) > _jnd_diff_thresh:
                    repeats.append((speed, False))
                    print(f'{speed} no adapter: Difference to first JND too large. Marked for repetition!')
                # if measurements too different, mark for repeat
                if abs(jnds[idx, 2, 1] - jnds[idx, 2, 0]) > _jnd_diff_thresh:
                    repeats.append((speed, True))
                    print(f'{speed} with adapters: Difference to first JND too large. Marked for repetition!')
        # save a string representation of the numpy results table
        _results_file.write(str(jnds[:, :, i]), tag=f'results round {i}')
    # rerun the conditions marked for repeated measurements
    if repeats:
        print('Repeating marked JND measurements.')
        for speed, adapters in repeats:  # these will just be tagged in the results file, not in the tables
            if adapters:
                jnd(speed, adapter_list)
            else:
                jnd(speed)
Esempio n. 12
0
def localization_test_headphones(targets, signals, n_reps=1, n_images=5, visual=False):
    """
    Run a basic localization test where previously recorded/generated binaural sound are played via headphones.
    The procedure is the same as in localization_test_freefield().

    Args:
        targets : rows from the speaker table or index numbers of the speakers.
        signals (array-like) : binaural sounds that are played. Must be ordered corresponding to the targets (first
            element of signals is played for the first row of targets etc.). If the elements of signals are
            instances of slab.Precomputed, a random one is drawn in each trial (useful if you don't want to repeat
            the exact same sound in each trial)
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
        visual(bool): If True, light a LED at the target position - the speakers must have a LED attached
    Returns:
        instance of slab.Trialsequence: the response is stored in the data attribute as tuples with (azimuth, elevation)
    """

    if not isinstance(CAMERAS, camera.Cameras) and CAMERAS.calibration is not None:
        raise ValueError("Camera must be initialized and calibrated before localization test!")
    if not PROCESSORS.mode == "loctest_headphones":
        PROCESSORS.initialize_default(mode="loctest_headphones")
    if not len(signals) == len(targets):
        raise ValueError("There must be one signal for each target!")
    if visual is True:
        if targets.bit.isnull.sum():
            raise ValueError("All speakers must have a LED attached for a test with visual cues")
    targets = [targets.loc[i] for i in targets.index]  # make list from data frame
    seq = slab.Trialsequence(targets, n_reps, kind="non_repeating")
    play_start_sound()
    for trial in seq:
        signal = signals[trial.index_number]  # get the signal corresponding to the target
        if isinstance(signal, slab.Precomputed):  # if signal is precomputed, pick a random one
            signal = signal[np.random.randint(len(signal))]
        try:
            signal = slab.Binaural(signal)
        except IndexError:
            logging.warning("Binaural sounds must have exactly two channels!")
        wait_for_button()
        while check_pose(fix=[0, 0]) is None:  # check if head is in position
            play_warning_sound()
            wait_for_button()
        # write sound into buffer
        PROCESSORS.write(tag="playbuflen", value=signal.nsamples, procs="RP2")
        PROCESSORS.write(tag="data_l", value=signal.left.data.flatten(), procs="RP2")
        PROCESSORS.write(tag="data_r", value=signal.right.data.flatten(), procs="RP2")
        seq = _loctest_trial(trial, seq, visual, n_images)
    play_start_sound()
    return seq
Esempio n. 13
0
def localization_test_freefield(targets, duration=0.5, n_reps=1, n_images=5, visual=False):
    """
    Run a basic localization test where the same sound is played from different
    speakers in randomized order, without playing the same position twice in
    a row. After every trial the presentation is paused and the listener has
    to localize the sound source by pointing the head towards the source and
    pressing the response button. The cameras need to be calibrated before the
    test! After every trial the listener has to point to the middle speaker at
    0 elevation and azimuth and press the button to indicate the next trial.

    Args:
        targets : rows from the speaker table or index numbers of the speakers.
        duration (float): duration of the noise played from the target positions in seconds
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
        visual(bool): If True, light a LED at the target position - the speakers must have a LED attached
    Returns:
        instance of slab.Trialsequence: the response is stored in the data attribute as tuples with (azimuth, elevation)
    """
    if not isinstance(CAMERAS, camera.Cameras) and CAMERAS.calibration is not None:
        raise ValueError("Camera must be initialized and calibrated before localization test!")
    if not PROCESSORS.mode == "loctest_freefield":
        PROCESSORS.initialize_default(mode="loctest_freefield")
    PROCESSORS.write(tag="playbuflen", value=int(slab.signal._default_samplerate*duration), procs=["RX81", "RX82"])
    if visual is True:
        if targets.bit.isnull.sum():
            raise ValueError("All speakers must have a LED attached for a test with visual cues")
    targets = [targets.loc[i] for i in targets.index]  # make list from data frame
    seq = slab.Trialsequence(targets, n_reps, kind="non_repeating")
    play_start_sound()
    for trial in seq:
        wait_for_button()
        while check_pose(fix=[0, 0]) is None:  # check if head is in position
            play_warning_sound()
            wait_for_button()
        sound = slab.Sound.pinknoise(duration=duration)
        set_signal_and_speaker(signal=sound.data.flatten(), speaker=trial.index_number)
        seq = _loctest_trial(trial, seq, visual, n_images)
    play_start_sound()
    return seq
def test_sequence():
    for _ in range(100):
        conditions_list = [numpy.random.randint(2, 10), ["a", "b", "c"], [("a", "b"), (1.5, 3.2)],
                           [slab.Sound.pinknoise(), slab.Sound.whitenoise()]]
        kinds = ["random_permutation", "non_repeating", "infinite"]
        for conditions in conditions_list:
            n_reps = numpy.random.randint(1, 10)
            kind = random.choice(kinds)
            sequence = slab.Trialsequence(conditions=conditions, n_reps=n_reps, kind=kind)
            if isinstance(conditions, int):
                conditions = list(range(1, conditions+1))
            assert sequence.conditions == conditions
            assert all(numpy.unique(sequence.trials) == numpy.array(range(1, sequence.n_conditions + 1)))
            if kind != "infinite":
                assert sequence.n_trials == len(conditions) * n_reps
                for trial in sequence:
                    assert trial == sequence.conditions[sequence.trials[sequence.this_n]-1]
            else:
                count = 0
                for trial in sequence:
                    assert trial == sequence.conditions[sequence.trials[sequence.this_n]-1]
                    count += 1
                    if count > 100:
                        break
Esempio n. 15
0
def calibrate_camera(speakers, n_reps=1, n_images=5, show=True):
    """
    Calibrate all cameras by lighting up a series of LEDs and estimate the pose when the head is pointed
    towards the currently lit LED. This results in a list of world and camera coordinates which is used to
    calibrate the cameras.

    Args:
        speakers (): rows from the speaker table. The speakers must have a LED attached
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
    Returns:
        pandas DataFrame: camera and world coordinates acquired (calibration is performed automatically)
    """
    # TODO: save the camera calibration in a temporary directory
    if not PROCESSORS.mode == "cam_calibration":  # initialize setup in camera calibration mode
        PROCESSORS.initialize_default(mode="cam_calibration")
    speakers = pick_speakers(speakers)
    if not all([s.digital_channel for s in speakers]):
        raise ValueError(
            "All speakers must have a LED attached for a test with visual cues"
        )
    seq = slab.Trialsequence(n_reps=n_reps, conditions=speakers)
    world_coordinates = [(seq.conditions[t - 1].azimuth,
                          seq.conditions[t - 1].elevation) for t in seq.trials]
    camera_coordinates = []
    for speaker in seq:
        write(tag="bitmask",
              value=int(speaker.digital_channel),
              processors=speaker.digital_proc)
        wait_for_button()
        camera_coordinates.append(
            CAMERAS.get_head_pose(average_axis=1,
                                  convert=False,
                                  n_images=n_images))
        write(tag="bitmask", value=0, processors=speaker.digital_proc)
    CAMERAS.calibrate(world_coordinates, camera_coordinates, plot=show)
Esempio n. 16
0
elif experiment == "freefield":
    response = setup.localization_test_freefield(duration=cfg["dur_loctest"],
                                                 n_reps=cfg["n_reps_training"],
                                                 speakers=cfg["test_speakers"],
                                                 visual=False)

response.to_csv(SUBJECTDIR / "responses" / ("%s_%s_test.csv" %
                                            (subject, experiment)))
print((response.ele_target -
       response.ele_response).abs().mean())  # print mean error

# STEP 5:run audiovisual training with adapter and probe:
elevation_freefield.localization_test_adapter(n_reps=cfg["n_reps_training"],
                                              visual=True)

# STEP 6: run experiment
for block in range(cfg["n_blocks"]):
    input("### press enter to start block %s ###" % (block))
    # Load previously generated stimulus sequence:
    target_seq, trial_seq = slab.Trialsequence(), slab.Trialsequence()
    target_seq.load_json(file_name=SUBJECTDIR / "sequences" /
                         ("%s_mmn%s.seq" % (subject, block)))
    trial_seq.load_json(file_name=SUBJECTDIR / "sequences" /
                        ("%s_trials%s.seq" % (subject, block)))
    # Run block and save the response:
    response = elevation_freefield.run_block(target_seq, trial_seq)
    response_path = SUBJECTDIR / "responses" / ("%s_%s_block_%s.csv" %
                                                (subject, experiment, block))
    response.to_csv(response_path)
    print("Saved responses as \n %s" % (response_path))
Esempio n. 17
0
def localization_test(sound, speakers, n_reps, n_images=1):
    """
    Run a basic localization test where the same sound is played from different
    speakers in randomized order, without playing the same position twice in
    a row. After every trial the presentation is paused and the listener has
    to localize the sound source by pointing the head towards the source and
    pressing the response button. The cameras need to be calibrated before the
    test! After every trial the listener has to point to the middle speaker at
    0 elevation and azimuth and press the button to iniciate the next trial.
    """
    if not _mode == "localization_test":
        initialize_devices(mode="localization_test")
    if isinstance(sound, slab.sound.Sound) and sound.nchannels == 1:
        data = sound.data.flatten()  # Not sure if flatten is needed...
    elif isinstance(sound, np.ndarray) and sound.ndim == 1:
        data = sound
    else:
        raise ValueError("Sound must be a 1D array or instance of slab.Sound!")
    if camera._cal is None:
        raise ValueError("Camera must be calibrated before localization test!")
    warning = slab.Sound.clicktrain(duration=0.4).data.flatten()
    speakers = speakers_from_list(speakers)
    seq = slab.Trialsequence(speakers, n_reps, kind="non_repeating")
    response = pd.DataFrame(
        columns=["ele_target", "azi_target", "ele_response", "azi_response"])
    while seq.n_remaining > 0:
        _, ch, proc, azi, ele = seq.__next__()
        trial = {"azi_target": azi, "ele_target": ele}
        set_variable(variable="chan", value=ch, proc="RX8%s" % int(proc))
        set_variable(variable="chan", value=25, proc="RX8%s" % int(3 - proc))
        set_variable(variable="playbuflen", value=len(sound), proc="RX8s")
        set_variable(variable="data", value=data, proc="RX8s")
        trigger()
        while not get_variable(variable="response", proc="RP2"):
            time.sleep(0.01)
        ele, azi = camera.get_headpose(n=n_images, convert=True, average=True)
        # TODO: implement success sound?
        trial["azi_response"], trial["ele_response"] = azi, ele
        response = response.append(trial, ignore_index=True)
        head_in_position = 0
        while head_in_position == 0:
            while not get_variable(variable="response", proc="RP2"):
                time.sleep(0.01)
            ele, azi = camera.get_headpose(n=1, convert=True, average=True)
            if ele is np.nan:
                ele = 0
            if azi is np.nan:
                azi = 0
            if np.abs(ele - _fix_ele) < _fix_acc and np.abs(
                    azi - _fix_azi) < _fix_acc:
                head_in_position = 1
            else:
                print(np.abs(ele - _fix_ele), np.abs(azi - _fix_azi))
                set_variable(variable="data", value=warning, proc="RX8s")
                set_variable(variable="chan", value=1, proc="RX81")
                set_variable(variable="chan", value=25, proc="RX82")
                set_variable(variable="playbuflen",
                             value=len(warning),
                             proc="RX8s")
                trigger()
    return response
Esempio n. 18
0
def localization_test_headphones(speakers,
                                 signals,
                                 n_reps=1,
                                 n_images=5,
                                 visual=False):
    """
    Run a basic localization test where previously recorded/generated binaural sound are played via headphones.
    The procedure is the same as in localization_test_freefield().

    Args:
        speakers : rows from the speaker table or index numbers of the speakers.
        signals (array-like) : binaural sounds that are played. Must be ordered corresponding to the targets (first
            element of signals is played for the first row of targets etc.). If the elements of signals are
            instances of slab.Precomputed, a random one is drawn in each trial (useful if you don't want to repeat
            the exact same sound in each trial)
        n_reps(int): number of repetitions for each target
        n_images(int): number of images taken for each head pose estimate
        visual(bool): If True, light a LED at the target position - the speakers must have a LED attached
    Returns:
        instance of slab.Trialsequence: the response is stored in the data attribute as tuples with (azimuth, elevation)
    """
    if not PROCESSORS.mode == "loctest_headphones":
        PROCESSORS.initialize_default(mode="loctest_headphones")
    if not len(signals) == len(speakers):
        raise ValueError("There must be one signal for each target!")
    if not all(
            isinstance(sig, (slab.Binaural, slab.Precomputed))
            for sig in signals):
        raise ValueError(
            "Signal argument must be an instance of slab.Binaural or slab.Precomputed."
        )
    if visual is True:
        if not all([s.digital_channel for s in speakers]):
            raise ValueError(
                "All speakers must have a LED attached for a test with visual cues"
            )
    seq = slab.Trialsequence(speakers, n_reps, kind="non_repeating")
    play_start_sound()
    for speaker in seq:
        signal = signals[seq.trials[seq.this_n] -
                         1]  # get the signal corresponding to the target
        if isinstance(signal, slab.Precomputed
                      ):  # if signal is precomputed, pick a random one
            signal = signal[np.random.randint(len(signal))]
            try:
                signal = slab.Binaural(signal)
            except IndexError:
                logging.warning(
                    "Binaural sounds must have exactly two channels!")
        wait_for_button()
        while check_pose(fix=[0, 0]) is None:  # check if head is in position
            play_warning_sound()
            wait_for_button()
        write(tag="playbuflen", value=signal.n_samples, processors="RP2")
        write(tag="data_l", value=signal.left.data.flatten(), processors="RP2")
        write(tag="data_r",
              value=signal.right.data.flatten(),
              processors="RP2")
        if visual is True:  # turn LED on
            write(tag="bitmask",
                  value=speaker.digital_channel,
                  processors=speaker.digital_proc)
        play()
        wait_to_finish_playing()
        wait_for_button()
        pose = get_head_pose(n_images=n_images)
        if visual is True:  # turn LED off
            write(tag="bitmask", value=0, processors=speaker.digital_proc)
        seq.add_response(pose)
    play_start_sound()
    # change conditions property so it contains the only azimuth and elevation of the source
    seq.conditions = np.array([(s.azimuth, s.elevation)
                               for s in seq.conditions])
    return seq
def jnd(condition, practise=False):
    '''
    Presents a staricase for a 2AFC task and returns the threshold.
    This threshold is used in the main experiment as jnd.
    condition ... 'room', voice', or 'itd'
    '''
    print('Three sounds are presented in each trial.')
    print('They are always different, but sometimes')
    if condition == 'room':
        print('one sound is played in a larger room,')
        print('and sometimes all three are played in the same room.')
        print('Was the larger room presented first, second, or third?')
    elif condition == 'voice':
        print('one is spoken by a different (larger) person,')
        print('and sometimes all three are spoken by the same person.')
        print('Was the larger person presented first, second, or third?')
    elif condition == 'itd':
        print('one is played from a different direction (slightly to the left),')
        print('and sometimes all three are played from straight ahead.')
        print('Was the sound slightly from the left played first, second, or third?')
    else:
        raise ValueError(f'Invalid condition {condition}.')
    print('Press 1 for first, 2 for second, 3 for third.')
    print('The difference will get more and more difficult to hear.')
    input('Press enter to start JND estimation...')
    repeat = 'r'
    condition_values = globals()[condition+'s'] # get the parameter list (vars rooms, voices, or itds) from condition string
    while repeat == 'r':
        # make a random, non-repeating list of words to present during the staircase
        word_seq = slab.Trialsequence(conditions=word_list, kind='infinite', label='word_seq')
        # define the staircase
        if practise:
            stairs = slab.Staircase(start_val=len(condition_values)-1, n_reversals=3,
                                step_sizes=[4, 3, 2], min_val=0, max_val=len(condition_values)-1, n_up=1, n_down=1, n_pretrials=0)
        else:
            stairs = slab.Staircase(start_val=len(condition_values)-4, n_reversals=15,
                                step_sizes=[4, 2], min_val=0, max_val=len(condition_values)-1, n_up=1, n_down=2, step_up_factor=1.5, n_pretrials=1) # should give approx. 70% hitrate
            _results_file.write(f'{condition} jnd:', tag='time')
        for trial in stairs:
            current = condition_values[int(trial)]
            # load stimuli
            word = next(word_seq)
            word2 = next(word_seq)
            word3 = next(word_seq)
            if condition == 'room':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{default_voice:.4g}_GPR168_{current}_{default_itd}.wav')
            elif condition == 'voice':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{current:.4g}_GPR168_{default_room}_{default_itd}.wav')
            elif condition == 'itd':
                jnd_stim = slab.Sound(stim_folder / word  / f'{word}_SER{default_voice:.4g}_GPR168_{default_room}_{current}.wav')
            default_stim1 = slab.Sound(stim_folder / word2 / f'{word2}_SER{default_voice:.4g}_GPR168_{default_room}_{default_itd}.wav')
            default_stim2 = slab.Sound(stim_folder / word3 / f'{word3}_SER{default_voice:.4g}_GPR168_{default_room}_{default_itd}.wav')
            stairs.present_afc_trial(jnd_stim, [default_stim1, default_stim2], isi=ISI_stairs, print_info=practise)
            if practise:
                stairs.plot()
        thresh = stairs.threshold()
        thresh_condition_value = condition_values[numpy.ceil(thresh).astype('int')]
        if practise:
            stairs.close_plot()
        else:
            print(f'room jnd: {round(thresh, ndigits=1)}')
            _results_file.write(repr(stairs), tag=f'stairs {condition}')
            _results_file.write(thresh, tag=f'jnd {condition}')
            _results_file.write(thresh_condition_value, tag=f'jnd condition value {condition}')
        repeat = input('Press enter to continue, "r" to repeat this threshold measurement.\n\n')
    return thresh_condition_value
Esempio n. 20
0
def localization_test_adapter(n_reps, speakers=None, visual=False):

    setup.set_speaker_config("dome")
    setup.initialize_devices(ZBus=True, cam=True, RX8_file=cfg["rx8_file_test"], RP2_file=cfg["rp2_file"])

    setup.set_variable(variable="n_adapter", value=int(
        cfg["dur_adapter"]*cfg["fs"]), proc="RX8s")
    setup.set_variable(variable="n_target", value=int(
        cfg["dur_target"]*cfg["fs"]), proc="RX8s")
    setup.set_variable(variable="playbuflen", value=int(
        cfg["dur_adapter"]*cfg["fs"]), proc="RX8s")
    setup.set_variable(variable="t_delay",
                       value=(cfg["dur_adapter"]-cfg["dur_ramp"])*1000, proc="RX8s")
    if visual is True:
        speakers = setup.all_leds()
    else:
        speakers = setup.speakers_from_list(speakers)
    seq = slab.Trialsequence(speakers, n_reps, kind="non_repeating")
    response = pd.DataFrame(columns=["ele_target", "azi_target", "ele_response", "azi_response"])
    setup.set_variable(variable="signal_len", value=len(start), proc="RX8s")
    setup.set_variable(variable="signal", value=start, proc="RX8s")
    setup.trigger(trig=2, proc="RX81")
    setup.wait_to_finish_playing()
    while not setup.get_variable(variable="response", proc="RP2"):
        time.sleep(0.01)
    while seq.n_remaining > 0:
        speaker, ch, proc_ch, azi, ele, bit, proc_bit = seq.__next__()
        stim = slab.Sound.pinknoise(duration=int(cfg["dur_target"]*cfg["fs"]))
        stim.ramp(when="both", duration=cfg["dur_ramp"], envelope=None)
        adapter_l, adapter_r = make_adapter(sound=slab.Sound.pinknoise(
            duration=int(cfg["dur_adapter"]*cfg["fs"])))
        setup.set_signal_and_speaker(signal=stim, speaker=speaker,
                                     apply_calibration=True)
        setup.set_variable(variable="data_adapter_l", value=adapter_l.data, proc="RX8s")
        setup.set_variable(variable="data_adapter_r", value=adapter_r.data, proc="RX8s")
        if visual is True:
            setup.set_variable(variable="bitmask", value=bit, proc=proc_bit)
        setup.trigger()
        setup.wait_to_finish_playing()
        while not setup.get_variable(variable="response", proc="RP2"):
            time.sleep(0.01)
        ele_r, azi_r = camera.get_headpose(convert=True, average=True)
        if visual is True:
            setup.set_variable(variable="bitmask", value=0, proc=proc_bit)
        trial = {"azi_target": azi, "ele_target": ele,
                 "azi_response": azi_r, "ele_response": ele_r}
        response = response.append(trial, ignore_index=True)
        head_in_position = 0  # check if the head is in position for next trial
        while head_in_position == 0:
            while not setup.get_variable(variable="response", proc="RP2"):
                time.sleep(0.01)
            ele, azi = camera.get_headpose(
                n_images=1, convert=True, average=True)
            if ele is np.nan:
                ele = 0
            if azi is np.nan:
                azi = 0
            if (np.abs(ele-cfg["fixation_point"][1]) < cfg["fixation_accuracy"] and
                    np.abs(azi-cfg["fixation_point"][0]) < cfg["fixation_accuracy"]):
                head_in_position = 1
            else:
                print(np.abs(ele-cfg["fixation_point"][1]),
                      np.abs(azi-cfg["fixation_point"][0]))
                setup.set_variable(variable="signal_len", value=len(warning), proc="RX8s")
                setup.set_variable(variable="signal", value=warning, proc="RX8s")
                setup.trigger(trig=2, proc="RX81")
    return response
n_repeat_conditions = 2
n_repeat_files = 2
n_repeat_speakers = 4
speakers = main.get_speaker_list(list(range(9, 24)))  # speakers used in the experiment
priming_speakers = speakers.iloc[::3, :]  # use every third speaker for priming
speakers = [speakers.loc[i] for i in speakers.index]

# make a folder for the subject:
subject = "subject7"
try:
    os.makedirs(DIR/"data"/subject)
except FileExistsError:
    print(f"folder {DIR/'data'/subject} already exists")

# generate and save the trial sequences:
conditions = slab.Trialsequence(conditions=["positive", "negative", "neutral"],
                                n_reps=n_repeat_conditions, kind='non_repeating')

conditions.save_json(DIR/"data"/subject/"conditions.json")
n_files = 30  # number of .wav files that contain the stimuli (per condition)
for i in range(conditions.n_trials+1):  # make trial lists for single blocks, +1 for initial noise block
    file_seq = slab.Trialsequence(conditions=n_files, n_reps=n_repeat_files, kind='non_repeating')
    speaker_seq = slab.Trialsequence(conditions=speakers, n_reps=n_repeat_speakers, kind='non_repeating')
    file_seq.save_json(DIR/"data"/subject/f"file_seq{i}.json")
    speaker_seq.save_pickle(DIR/"data"/subject/f"speaker_seq{i}.pickle")

# run a "noise" block:
file_seq = slab.Trialsequence(str(DIR/"data"/subject/f"file_seq0.json"))
speaker_seq = slab.Trialsequence(str(DIR/"data"/subject/f"speaker_seq0.pickle"))
response = block(kind="noise", speaker_seq=speaker_seq, file_seq=file_seq)  # run the block

# run the blocks: