Esempio n. 1
0
    def test_play_sound_sound_callback_evokes_with_timing(self):
        # fake sound file path
        sound_file_path = 'test_sound_file_path'
        test_trigger_name = 'test_trigger_name'
        test_trigger_time = 111
        self.test_timing = [test_trigger_name, test_trigger_time]

        experiment_clock = mock()

        def mock_callback_function(timing):
            self.assertEqual(timing, self.test_timing)

        # mock the other library interactions
        when(sf).read(sound_file_path, dtype='float32').thenReturn(
            ('data', MOCK_FS))
        when(sd).play(any(), any()).thenReturn(None)
        when(core).wait(any()).thenReturn(None)
        when(experiment_clock).getTime().thenReturn(test_trigger_time)

        play_sound(
            sound_file_path,
            track_timing=True,
            sound_callback=mock_callback_function,
            trigger_name=test_trigger_name,
            experiment_clock=experiment_clock,
        )

        # verify all the expected calls happended and the expected number of
        # times
        verify(sf, times=1).read(sound_file_path, dtype='float32')
        verify(sd, times=1).play('data', MOCK_FS)
        verify(core, times=2).wait(any())
 def play_sound_callback(_sti):
     sound_path = next(alerts)
     if "blank" in sound_path:
         return
     play_sound(sound_path,
                sound_load_buffer_time=0,
                sound_post_buffer_time=sound_delay)
Esempio n. 3
0
    def test_play_sound_raises_exception_if_soundfile_cannot_read_file(self):
        # fake sound file path
        sound_file_path = 'test_sound_file_path'

        # mock the other library interactions
        when(sf).read(sound_file_path,
                      dtype='float32').thenRaise(Exception(''))

        # assert it raises the exception
        with self.assertRaises(Exception):
            play_sound(sound_file_path)

        # verify all the expected calls happended and the expected number of times
        verify(sf, times=1).read(sound_file_path, dtype='float32')
Esempio n. 4
0
    def test_play_sound_returns_timing(self):
        # fake sound file path
        sound_file_path = 'test_sound_file_path'

        # mock the other library interactions
        when(sf).read(sound_file_path, dtype='float32').thenReturn(
            ('data', MOCK_FS))
        when(sd).play(any(), any()).thenReturn(None)
        when(core).wait(any()).thenReturn(None)

        # play our test sound file
        timing = play_sound(sound_file_path)

        # assert the response is as expected
        self.assertIsInstance(timing, list)

        # verify all the expected calls happended and the expected number of times
        verify(sf, times=1).read(sound_file_path, dtype='float32')
        verify(sd, times=1).play('data', MOCK_FS)
        verify(core, times=2).wait(any())
Esempio n. 5
0
def _calibration_trigger(experiment_clock: core.Clock,
                         trigger_type: str='sound',
                         trigger_name: str='calibration_trigger',
                         display=None,
                         on_trigger=None) -> List[tuple]:
    """Calibration Trigger.

        Outputs triggers for the purpose of calibrating data and stimuli.
        This is an ongoing difficulty between OS, DAQ devices and stimuli type. This
        code aims to operationalize the approach to finding the correct DAQ samples in
        relation to our trigger code.

    PARAMETERS
    ---------
        experiment_clock(clock): clock with getTime() method, which is used in the code
            to report timing of stimuli
        trigger_type(string): type of trigger that is desired (sound, image, etc)
        display(DisplayWindow): a window that can display stimuli. Currently, a Psychopy window.
        on_trigger(function): optional callback; if present gets called
                 when the calibration trigger is fired; accepts a single
                 parameter for the timing information.
        Return:
            timing(array): timing values for the calibration triggers to be written to trigger file or
                    used to calculate offsets.
    """
    trigger_callback = TriggerCallback()

    # If sound trigger is selected, output calibration tones
    if trigger_type == SOUND_TYPE:
        import sounddevice as sd
        import soundfile as sf

        play_sound(
            sound_file_path='bcipy/static/sounds/1k_800mV_20ms_stereo.wav',
            dtype='float32',
            track_timing=True,
            sound_callback=trigger_callback,
            sound_load_buffer_time=0.5,
            experiment_clock=experiment_clock,
            trigger_name='calibration_trigger')

    elif trigger_type == IMAGE_TYPE:
        if display:
            calibration_box = visual.ImageStim(
                win=display,
                image='bcipy/static/images/testing_images/white.png',
                pos=(-.5, -.5),
                mask=None,
                ori=0.0)
            calibration_box.size = resize_image(
                'bcipy/static/images/testing_images/white.png',
                display.size, 0.75)

            display.callOnFlip(trigger_callback.callback, experiment_clock, trigger_name)
            if on_trigger is not None:
                display.callOnFlip(on_trigger, trigger_name)

            presentation_time = int(1 * display.getActualFrameRate())
            for num_frames in range(presentation_time):
                calibration_box.draw()
                display.flip()

        else:
            raise Exception(
                'Display object required for calibration with images!')

    else:
        raise Exception('Trigger type not implemented for Calibration yet!')

    return trigger_callback.timing
Esempio n. 6
0
def offline_analysis(data_folder: str = None,
                     parameters: dict = {},
                     alert_finished: bool = True):
    """ Gets calibration data and trains the model in an offline fashion.
        pickle dumps the model into a .pkl folder
        Args:
            data_folder(str): folder of the data
                save all information and load all from this folder
            parameter(dict): parameters for running offline analysis
            alert_finished(bool): whether or not to alert the user offline analysis complete

        How it Works:
        - reads data and information from a .csv calibration file
        - reads trigger information from a .txt trigger file
        - filters data
        - reshapes and labels the data for the training procedure
        - fits the model to the data
            - uses cross validation to select parameters
            - based on the parameters, trains system using all the data
        - pickle dumps model into .pkl file
        - generates and saves offline analysis screen
        - [optional] alert the user finished processing
    """

    if not data_folder:
        data_folder = load_experimental_data()

    mode = 'calibration'
    trial_length = parameters.get('collection_window_after_trial_length')

    raw_dat, _, channels, type_amp, fs = read_data_csv(
        data_folder + '/' + parameters.get('raw_data_name', 'raw_data.csv'))

    log.info(f'Channels read from csv: {channels}')
    log.info(f'Device type: {type_amp}')

    downsample_rate = parameters.get('down_sampling_rate', 2)

    # Remove 60hz noise with a notch filter
    notch_filter_data = notch.notch_filter(raw_dat, fs, frequency_to_remove=60)

    # bandpass filter from 2-45hz
    filtered_data = bandpass.butter_bandpass_filter(notch_filter_data,
                                                    2,
                                                    45,
                                                    fs,
                                                    order=2)

    # downsample
    data = downsample.downsample(filtered_data, factor=downsample_rate)

    # Process triggers.txt
    triggers_file = parameters.get('trigger_file_name', 'triggers.txt')
    _, t_t_i, t_i, offset = trigger_decoder(
        mode=mode, trigger_path=f'{data_folder}/{triggers_file}')

    static_offset = parameters.get('static_trigger_offset', 0)

    offset = offset + static_offset

    # Channel map can be checked from raw_data.csv file.
    # read_data_csv already removes the timespamp column.
    channel_map = analysis_channels(channels, type_amp)

    x, y, _, _ = trial_reshaper(t_t_i,
                                t_i,
                                data,
                                mode=mode,
                                fs=fs,
                                k=downsample_rate,
                                offset=offset,
                                channel_map=channel_map,
                                trial_length=trial_length)

    k_folds = parameters.get('k_folds', 10)

    model, auc = train_pca_rda_kde_model(x, y, k_folds=k_folds)

    log.info('Saving offline analysis plots!')

    # After obtaining the model get the transformed data for plotting purposes
    model.transform(x)
    generate_offline_analysis_screen(
        x,
        y,
        model=model,
        folder=data_folder,
        down_sample_rate=downsample_rate,
        fs=fs,
        save_figure=True,
        show_figure=False,
        channel_names=analysis_channel_names_by_pos(channels, channel_map))

    log.info('Saving the model!')
    with open(data_folder + f'/model_{auc}.pkl', 'wb') as output:
        pickle.dump(model, output)

    if alert_finished:
        offline_analysis_tone = parameters.get('offline_analysis_tone')
        play_sound(offline_analysis_tone)

    return model