Exemplo n.º 1
0
def test_read_write_wav():
    """Test reading and writing WAV files
    """
    fname = op.join(tempdir, 'temp.wav')
    data = np.r_[np.random.rand(1000), 1, -1]
    fs = 44100

    # Use normal 16-bit precision: not great
    write_wav(fname, data, fs)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data[np.newaxis, :], data_read, 4)

    # test our overwrite check
    assert_raises(IOError, write_wav, fname, data, fs)

    # test forcing fs dtype to int
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        write_wav(fname, data, float(fs), overwrite=True)
        assert_equal(len(w), 1)

    # Use 64-bit int: not supported
    assert_raises(RuntimeError, write_wav, fname, data, fs, dtype=np.int64,
                  overwrite=True)

    # Use 32-bit int: better
    write_wav(fname, data, fs, dtype=np.int32, overwrite=True)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data[np.newaxis, :], data_read, 7)

    if _has_scipy_version('0.13'):
        # Use 32-bit float: better
        write_wav(fname, data, fs, dtype=np.float32, overwrite=True)
        data_read, fs_read = read_wav(fname)
        assert_equal(fs_read, fs)
        assert_array_almost_equal(data[np.newaxis, :], data_read, 7)

        # Use 64-bit float: perfect
        write_wav(fname, data, fs, dtype=np.float64, overwrite=True)
        data_read, fs_read = read_wav(fname)
        assert_equal(fs_read, fs)
        assert_array_equal(data[np.newaxis, :], data_read)
    else:
        assert_raises(RuntimeError, write_wav, fname, data, fs,
                      dtype=np.float32, overwrite=True)

    # Now try multi-dimensional data
    data = np.tile(data[np.newaxis, :], (2, 1))
    write_wav(fname, data[np.newaxis, :], fs, overwrite=True)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data, data_read, 4)

    # Make sure our bound check works
    assert_raises(ValueError, write_wav, fname, data * 2, fs, overwrite=True)
Exemplo n.º 2
0
from expyfun._trigger_controllers import decimals_to_binary
from expyfun import assert_version

assert_version('8511a4d')

fs = 24414
stim_dir = op.join(op.dirname(__file__), 'stimuli', 'ids')
sound_files = ['inForest_part-1-rms.wav',
               'inForest_part-2-rms.wav',
               'inForest_part-3-rms.wav',
               'inForest_part-4-rms.wav',
               'inForest_part-5-rms.wav']

sound_files = {j: op.join(stim_dir, k)
               for j, k in enumerate(sound_files)}
wavs = [np.ascontiguousarray(read_wav(v)) for _, v in sorted(sound_files.items())]
# convert length of wave files into number of bits
n_bits = int(np.floor(np.log2(len(wavs)))) + 1
with ExperimentController('IDS', stim_db=75, stim_fs=fs, stim_rms=0.01,
                          check_rms=None, suppress_resamp=True) as ec:
    for ii, wav in enumerate(wavs):
        # stamp trigger line prior to stimulus onset
        ec.clear_buffer()
        ec.load_buffer(wav[0])
        ec.identify_trial(ec_id=str(ii), ttl_id=decimals_to_binary([ii], [n_bits]))
        # our next start time is our last start time, plus
        # the stimulus duration
        stim_len = 1./fs * len(wav[0][0])  # in seconds
        ec.start_stimulus()  # stamps stimulus onset
        ec.wait_secs(stim_len)  # wait through stimulus duration to stop the playback
        ec.stop()
randomize = True

# RANDOM NUMBER GENERATOR
rng = np.random.RandomState(0)

# READ IN RECORDED WORDS
word_cats = {}
word_wavs = {}
targ_words = []
word_fs = {}
print('Reading in WAVs and possibly resampling')
with open(wordlist, 'r') as wl:
    for line in wl:
        word, category = line.strip().split('\t')
        word_cats[word] = category
        samples, fs = stim.read_wav(op.join(worddir, word + '.wav'))
        num_samples = int(round(samples.shape[-1] * float(output_fs) / fs))
        word_wavs[word] = resample(samples, num_samples, axis=-1)
        word_fs[word] = fs
longest_word_letters = max([len(x) for x in word_cats.keys()])

# REVERSE DICTIONARY FROM word_cats
cat_words = {}
for word, cat in word_cats.items():
    if cat not in cat_words.keys():
        cat_words[cat] = [word]
    else:
        cat_words[cat].append(word)

# EXCLUDE THE LARGEST CATEGORIES FOR THIS EXPERIMENT
categories = cat_words.keys()
Exemplo n.º 4
0
           for r in regions]
n_talkers = len(talkers[0][0])

# For each talker make a string
for ri, r in enumerate(regions):  # Make pairs out of each region
    for ti in range(n_talkers):  # pairs should be gender-paired
        order = rand.permutation(n_sent)
        string = np.zeros((2, 0))
        save_fn = ''
        for si in order:
            s = sentences[si]
            wavs = []
            for gi, g in enumerate(genders):
                t = talkers[ri][gi][ti]
                fn = file_str % (r, g, t, s)
                w, fs = stim.read_wav(fn, verbose=False)
                wavs += [w]
                if si == order[0]:
                    save_fn += '%s%s%s_' % (r, g, t)
            lens = np.array([w.shape[-1] for w in wavs])
            len_max = lens.max()
            shifts = ((len_max - lens) / 2.).astype(int)
            string = np.concatenate((string, np.zeros((2, len_max))), -1)
            string_len = string.shape[-1]
            for gi, start_ind in enumerate(shifts - len_max + string_len):
                string[gi, start_ind:start_ind + lens[gi]] = wavs[gi]
        save_fn = save_fn[:-1] + '.wav'
        save_fn = r + str(ti) + '.wav'
        stim.write_wav(os.path.join(save_path, save_fn),
                       string, fs, overwrite=True)
        print('Finished talker %i / %i.' % (ti + 1, n_talkers))
Exemplo n.º 5
0
This shows how to make simple vocoded stimuli.

@author: larsoner
"""

import numpy as np
import matplotlib.pyplot as mpl

from expyfun.stimuli import vocode, play_sound, window_edges, read_wav, rms
from expyfun import fetch_data_file

print(__doc__)


data, fs = read_wav(fetch_data_file('audio/dream.wav'))
data = window_edges(data[0], fs)
t = np.arange(data.size) / float(fs)
# noise vocoder
data_noise = vocode(data, fs, mode='noise')
data_noise = data_noise * 0.01 / rms(data_noise)
# sinewave vocoder
data_tone = vocode(data, fs, mode='tone')
data_tone = data_tone * 0.01 / rms(data_tone)
# poisson vocoder
data_click = vocode(data, fs, mode='poisson', rate=400)
data_click = data_click * 0.01 / rms(data_click)

# combine all three
cutoff = data.shape[-1] // 3
data_allthree = data_noise.copy()
Exemplo n.º 6
0
========================

This shows how to make simple vocoded stimuli.

@author: larsoner
"""

import numpy as np
import matplotlib.pyplot as plt

from expyfun.stimuli import vocode, play_sound, window_edges, read_wav, rms
from expyfun import fetch_data_file

print(__doc__)

data, fs = read_wav(fetch_data_file('audio/dream.wav'))
data = window_edges(data[0], fs)
t = np.arange(data.size) / float(fs)
# noise vocoder
data_noise = vocode(data, fs, mode='noise')
data_noise = data_noise * 0.01 / rms(data_noise)
# sinewave vocoder
data_tone = vocode(data, fs, mode='tone')
data_tone = data_tone * 0.01 / rms(data_tone)
# poisson vocoder
data_click = vocode(data, fs, mode='poisson', rate=400)
data_click = data_click * 0.01 / rms(data_click)

# combine all three
cutoff = data.shape[-1] // 3
data_allthree = data_noise.copy()
Exemplo n.º 7
0
    rms_out = 0.01  # normalize them to have about 0.01 RMS
    dB_out = 65
    if not op.isdir(out_dir):
        os.mkdir(out_dir)
    list_names = ['sentnew2a.lst', 'sentnew2b.lst']  # list names
    # These lists have the same set of stimuli, right?
    names, codes, isis = parse_list(op.join(stim_dir, list_names[0]))
    names_b, codes_b, isis_b = parse_list(op.join(stim_dir, list_names[1]))
    assert set(names) == set(names_b)
    list_name = list_names[0]  # okay, just process one then
    del list_names

    datas = list()
    print('Reading and resampling stimuli...')
    for ii, (name, code) in enumerate(zip(names, codes)):
        data, fs_read = read_wav(op.join(stim_dir, name), verbose=False)
        assert fs == fs_read
        assert (data[0] == data[1]).all()
        data = data[0]  # one channel
        datas.append(resample(data, fs_out, fs, npad='auto'))
        assert np.isclose(datas[-1].shape[-1] / float(fs_out),
                          data.shape[-1] / float(fs), atol=1e-3)  # 1 ms
    assert len(datas) == len(names)
    rmss = [rms(d) for d in datas]
    factor = rms_out / np.mean(rmss)
    print('Writing stimuli...')
    for name, data in zip(names, datas):
        data *= factor  # RMS mean across stimuli is now our desired value
        write_wav(op.join(out_dir, name), data, fs_out, verbose=False,
                  overwrite=True)
    rmss = np.array([rms(d) for d in datas])
Exemplo n.º 8
0
    rms_out = 0.01  # normalize them to have about 0.01 RMS
    dB_out = 65
    if not op.isdir(out_dir):
        os.mkdir(out_dir)
    list_names = ['sentnew2a.lst', 'sentnew2b.lst']  # list names
    # These lists have the same set of stimuli, right?
    names, codes, isis = parse_list(op.join(stim_dir, list_names[0]))
    names_b, codes_b, isis_b = parse_list(op.join(stim_dir, list_names[1]))
    assert set(names) == set(names_b)
    list_name = list_names[0]  # okay, just process one then
    del list_names

    datas = list()
    print('Reading and resampling stimuli...')
    for ii, (name, code) in enumerate(zip(names, codes)):
        data, fs_read = read_wav(op.join(stim_dir, name), verbose=False)
        assert fs == fs_read
        assert (data[0] == data[1]).all()
        data = data[0]  # one channel
        datas.append(resample(data, fs_out, fs, npad='auto'))
        assert np.isclose(datas[-1].shape[-1] / float(fs_out),
                          data.shape[-1] / float(fs),
                          atol=1e-3)  # 1 ms
    assert len(datas) == len(names)
    rmss = [rms(d) for d in datas]
    factor = rms_out / np.mean(rmss)
    print('Writing stimuli...')
    for name, data in zip(names, datas):
        data *= factor  # RMS mean across stimuli is now our desired value
        write_wav(op.join(out_dir, name),
                  data,
Exemplo n.º 9
0
"""
===============================================================================
Script 'play-noise.py'
===============================================================================

This script plays noise for headphone calibration purposes.
"""
# @author: drmccloy
# Created on Tue Jul 25 14:35:40 PDT 2017
# License: BSD (3-clause)

from expyfun import ExperimentController, get_keyboard_input
from expyfun.stimuli import read_wav

# load noise file
noise, fs = read_wav('whitenoise_16bit_44100Hz_70dB.wav')
dur = noise.shape[-1] / fs

ec_params = dict(exp_name='test-noise',
                 participant='noise',
                 session='0',
                 audio_controller='pyglet',
                 response_device='keyboard',
                 stim_fs=44100,
                 stim_rms=0.01,
                 check_rms=None,
                 output_dir=None,
                 force_quit=['q'],
                 full_screen=False,
                 window_size=(800, 600),
                 version='dev')
# -*- coding: utf-8 -*-
"""
=============================
Script 'DAS-cog-load stimuli'
=============================

This script makes spatially-distributed word streams.
"""
# Author: Dan McCloy <*****@*****.**>
#
# License: BSD (3-clause)

import os.path as op
from glob import glob
from expyfun.stimuli import rms, read_wav, write_wav

indir = 'monotonizedWords'
outdir = 'normalizedWords'
target_rms = 0.01

files = glob(op.join(indir, '*.wav'))

for f in files:
    fname = op.split(f)[-1]
    wav, fs = read_wav(f)
    new_wav = wav * target_rms / rms(wav)
    write_wav(op.join(outdir, fname), new_wav, fs)
Exemplo n.º 11
0
#                               sound_dur / 2.5)
#    b, a = sig.butter(2, fc / (fs / 2))
#    sounds = sig.lfilter(b, a, sounds)
    t = np.arange(sound_len, dtype=float) / fs
    sounds = np.zeros(sound_len)
    f0 = 200
    for f in range(f0, 1301, f0):
        sounds += np.sin(2 * np.pi * t * f)
    sounds *= base_vol / stim.rms(sounds, keepdims=True)
    sounds *= np.exp(-t / sound_dur * 4)
    sounds = stim.window_edges(sounds, fs, 0.01)
else:
    assert(len(sound_files) == 1)
    temp = []
    for wav in sound_files:
        temp += [stim.read_wav(wav)[0]]
    fs = stim.read_wav(sound_files[0])[1]
    lens = [w.shape[1] for w in temp]
    sounds = np.zeros((2, np.max(lens)))
    for si, l in enumerate(lens):
        sounds[si, :l] = temp[si]
    sounds = sig.resample(sounds, 44100 * sounds.shape[1] / fs, axis=1)
    fs = 44100
    sound_len = sounds.shape[1]



# Make the ITD function
def delay(x, time, fs, axis=-1, keeplength=False, pad=1):
    extra_pad = 200  # add 200 samples to prevent wrapping
    samps = int(np.floor(time * fs))
Exemplo n.º 12
0
    for ix, stim in stimuli.itertuples():
        # are we done with training?
        if run_training and ix == first_trial:
            fmt = [green, white, live_keys[0]]
            ec.screen_prompt(msg['end_training'].format(*fmt))

        # break between blocks
        trial_num = ix - first_trial
        if trial_num > 0 and trial_num % block_len == 0:
            block_num = trial_num // block_len
            fmt = [block_num, n_blocks, live_keys[0]]
            ec.screen_prompt(msg['end_block'].format(*fmt))

        # load the wav file
        dir = train_dir if ix < 0 else stim_dir
        wav, fs = read_wav(op.join(dir, stim))
        dur = wav.shape[-1] / fs
        ec.load_buffer(wav)

        # identify trial and save to logfile
        talker, sentence, snr = stim[:6], stim[7:12], stim[13:15]
        trial_id_parts = [
            'trial:',
            str(ix), 'talker:', talker, 'sentence:', sentence, 'SNR:', snr
        ]
        ec.identify_trial(ec_id=' '.join(trial_id_parts), ttl_id=[])

        # show current stim info and play stimulus
        fmt = [green, white] + trial_id_parts
        if ix < 0:
            fmt[2] = 'training:'
Exemplo n.º 13
0
opath = 'strings'
spath = 'maskers'

genders = ['M', 'F']  # male in left channel, female in right
regions = ['NW', 'CH']
talkers = [str(i) for i in range(5)]
loc = ['C', 'S']
az = [0, -60]

n_talkers = 3
wavs = [[np.zeros((2, 0)) for _ in range(2)] for _ in range(2)]
for ti, t in enumerate(talkers[:n_talkers]):
    #for ri, r in enumerate(regions):
    ri = 0
    r = regions[ri]
    wav, fs = stim.read_wav(join(opath, r + t + '.wav'))
    for gi, g in enumerate(genders):
        for li, (l, a) in enumerate(zip(loc, az)):
            wav_loc = stim.convolve_hrtf(wav[gi], fs, a)
            lens = [wavs[gi][li].shape[-1], wav_loc.shape[-1]]
            dl = lens[0] - lens[1]
            if dl < 0:
                wavs[gi][li] = np.concatenate((wavs[gi][li],
                                               np.zeros((2, -dl))), -1)
            if dl > 0:
                wav_loc = np.concatenate((wav_loc, np.zeros((2, dl))), -1)
            wavs[gi][li] += wav_loc

for gi, g in enumerate(genders):
    for li, (l, a) in enumerate(zip(loc, az)):
        fn = join(spath, '%s_%s_%s.wav' % (r, g, l))