Ejemplo n.º 1
0
def audio_training(root_data_folder, dry=False, max_sessions=False):
    from ibllib.io.extractors import training_audio as audio
    audio_flags = Path(root_data_folder).rglob('audio_training.flag')
    c = 0
    for flag in audio_flags:
        c += 1
        if max_sessions and c > max_sessions:
            return
        _logger.info(flag)
        if dry:
            continue
        session_path = flag.parent
        try:
            settings = raw_data_loaders.load_settings(session_path)
            typ = extract_session.get_task_extractor_type(
                settings.get('PYBPOD_PROTOCOL'))
        except json.decoder.JSONDecodeError:
            typ = 'unknown'
        # this extractor is only for biased and training sessions
        if typ not in ['biased', 'training', 'habituation']:
            flag.unlink()
            continue
        audio.extract_sound(session_path, save=True, delete=True)
        flag.unlink()
        session_path.joinpath('register_me.flag').touch()
Ejemplo n.º 2
0
 def test_qc_extract(self):
     # extract audio
     audio.extract_sound(self.ses_path, save=True)
     D = alf.io.load_object(self.ses_path / 'alf', '_ibl_audioSpectrogram')
     cues = alf.io.load_object(self.ses_path / 'alf',
                               '_ibl_audioOnsetGoCue.times_microphone')
     self.assertEqual(cues['times_microphone'].size, 5)
     self.assertEqual(D['power'].shape[0], D['times_microphone'].shape[0])
     self.assertEqual(D['frequencies'].shape[1], D['power'].shape[1])
Ejemplo n.º 3
0
 def test_qc_extract(self):
     # extract audio
     audio.extract_sound(self.ses_path, save=True)
     D = alf.io.load_object(self.ses_path / 'raw_behavior_data',
                            'audioSpectrogram')
     cues = alf.io.load_object(self.ses_path / 'raw_behavior_data',
                               'audioOnsetGoCue')
     self.assertEqual(cues['times_mic'].size, 4)
     self.assertEqual(D['power'].shape[0], D['times_mic'].shape[0])
     self.assertEqual(D['frequencies'].shape[1], D['power'].shape[1])
Ejemplo n.º 4
0
 def _run(self, overwrite=False):
     return training_audio.extract_sound(self.session_path,
                                         save=True,
                                         delete=True)
Ejemplo n.º 5
0
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns

import alf.io

from ibllib.io.extractors import training_audio as audio

main_path = '/mnt/s0/Data/Subjects'

# step 1 is to launch the extraction on audio
for wfile in Path(main_path).rglob('*.wav'):
    print(wfile)
    ses_path = wfile.parents[1]
    audio.extract_sound(ses_path, save=True)

# step 2 plot the result - here for the last session only
D = alf.io.load_object(ses_path / 'alf', 'audioSpectrogram')

cues = alf.io.load_object(ses_path / 'alf',
                          'audioOnsetGoCue',
                          attribute='times',
                          timescale='microphone')
tlims = D['times_microphone'][[0, -1]].flatten()
flims = D['frequencies'][0, [0, -1]].flatten()

fig = plt.figure(figsize=[16, 7])
ax = plt.axes()

im = ax.imshow(20 * np.log10(D['power'].T),