tr_attn = tr_attn[trial_order]
    tr_targs = tr_targs[trial_order]
    tr_foils = tr_foils[trial_order]
    tr_targ_loc = tr_targ_loc[trial_order]
    tr_foil_loc = tr_foil_loc[trial_order]
    tr_onsets = tr_onsets[trial_order]
    tr_onset_samp = tr_onset_samp[trial_order]
    tr_stim = tr_stim[trial_order]

# WRITE WAV FILES
if write_wavs:
    print('Writing stimuli to disk')
    for tnum, trial in enumerate(tr_stim):
        fname = 'trial-{}-{}.wav'.format(np.char.mod('%03d', tnum),
                                         ''.join(np.char.array(tr_attn[tnum])))
        stim.write_wav(op.join(stimdir, fname), trial, output_fs,
                       overwrite=True)
    # training stims
    for tnum, trial in enumerate(one_stim):
        fname = 'train-one-{}.wav'.format(np.char.mod('%02d', tnum))
        stim.write_wav(op.join(stimdir, fname), trial, output_fs,
                       overwrite=True)
    for tnum, trial in enumerate(two_stim):
        fname = 'train-two-ab-{}.wav'.format(np.char.mod('%02d', tnum))
        stim.write_wav(op.join(stimdir, fname), trial, output_fs,
                       overwrite=True)
    for tnum, trial in enumerate(four_a_stim):
        fname = 'train-four-a-{}.wav'.format(np.char.mod('%02d', tnum))
        stim.write_wav(op.join(stimdir, fname), trial, output_fs,
                       overwrite=True)
    for tnum, trial in enumerate(four_aa_stim):
        fname = 'train-four-aa-{}.wav'.format(np.char.mod('%02d', tnum))
Esempio n. 2
0
def test_read_write_wav():
    """Test reading and writing WAV files
    """
    fname = op.join(tempdir, 'temp.wav')
    data = np.r_[np.random.rand(1000), 1, -1]
    fs = 44100

    # Use normal 16-bit precision: not great
    write_wav(fname, data, fs)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data[np.newaxis, :], data_read, 4)

    # test our overwrite check
    assert_raises(IOError, write_wav, fname, data, fs)

    # test forcing fs dtype to int
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        write_wav(fname, data, float(fs), overwrite=True)
        assert_equal(len(w), 1)

    # Use 64-bit int: not supported
    assert_raises(RuntimeError, write_wav, fname, data, fs, dtype=np.int64,
                  overwrite=True)

    # Use 32-bit int: better
    write_wav(fname, data, fs, dtype=np.int32, overwrite=True)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data[np.newaxis, :], data_read, 7)

    if _has_scipy_version('0.13'):
        # Use 32-bit float: better
        write_wav(fname, data, fs, dtype=np.float32, overwrite=True)
        data_read, fs_read = read_wav(fname)
        assert_equal(fs_read, fs)
        assert_array_almost_equal(data[np.newaxis, :], data_read, 7)

        # Use 64-bit float: perfect
        write_wav(fname, data, fs, dtype=np.float64, overwrite=True)
        data_read, fs_read = read_wav(fname)
        assert_equal(fs_read, fs)
        assert_array_equal(data[np.newaxis, :], data_read)
    else:
        assert_raises(RuntimeError, write_wav, fname, data, fs,
                      dtype=np.float32, overwrite=True)

    # Now try multi-dimensional data
    data = np.tile(data[np.newaxis, :], (2, 1))
    write_wav(fname, data[np.newaxis, :], fs, overwrite=True)
    data_read, fs_read = read_wav(fname)
    assert_equal(fs_read, fs)
    assert_array_almost_equal(data, data_read, 4)

    # Make sure our bound check works
    assert_raises(ValueError, write_wav, fname, data * 2, fs, overwrite=True)
n_talkers = len(talkers[0][0])

# For each talker make a string
for ri, r in enumerate(regions):  # Make pairs out of each region
    for ti in range(n_talkers):  # pairs should be gender-paired
        order = rand.permutation(n_sent)
        string = np.zeros((2, 0))
        save_fn = ''
        for si in order:
            s = sentences[si]
            wavs = []
            for gi, g in enumerate(genders):
                t = talkers[ri][gi][ti]
                fn = file_str % (r, g, t, s)
                w, fs = stim.read_wav(fn, verbose=False)
                wavs += [w]
                if si == order[0]:
                    save_fn += '%s%s%s_' % (r, g, t)
            lens = np.array([w.shape[-1] for w in wavs])
            len_max = lens.max()
            shifts = ((len_max - lens) / 2.).astype(int)
            string = np.concatenate((string, np.zeros((2, len_max))), -1)
            string_len = string.shape[-1]
            for gi, start_ind in enumerate(shifts - len_max + string_len):
                string[gi, start_ind:start_ind + lens[gi]] = wavs[gi]
        save_fn = save_fn[:-1] + '.wav'
        save_fn = r + str(ti) + '.wav'
        stim.write_wav(os.path.join(save_path, save_fn),
                       string, fs, overwrite=True)
        print('Finished talker %i / %i.' % (ti + 1, n_talkers))
Esempio n. 4
0
    del list_names

    datas = list()
    print('Reading and resampling stimuli...')
    for ii, (name, code) in enumerate(zip(names, codes)):
        data, fs_read = read_wav(op.join(stim_dir, name), verbose=False)
        assert fs == fs_read
        assert (data[0] == data[1]).all()
        data = data[0]  # one channel
        datas.append(resample(data, fs_out, fs, npad='auto'))
        assert np.isclose(datas[-1].shape[-1] / float(fs_out),
                          data.shape[-1] / float(fs),
                          atol=1e-3)  # 1 ms
    assert len(datas) == len(names)
    rmss = [rms(d) for d in datas]
    factor = rms_out / np.mean(rmss)
    print('Writing stimuli...')
    for name, data in zip(names, datas):
        data *= factor  # RMS mean across stimuli is now our desired value
        write_wav(op.join(out_dir, name),
                  data,
                  fs_out,
                  verbose=False,
                  overwrite=True)
    rmss = np.array([rms(d) for d in datas])
    assert np.isclose(np.mean(rmss), rms_out)
    play_rmss = dB_out + 20 * np.log10(rmss / rms_out)
    print('Assuming a %s dB SPL set in ExperimentController, stimuli will '
          'play with a long-term RMS of:\n[%0.1f, %0.1f] dB' %
          (dB_out, play_rmss.min(), play_rmss.max()))
taud_stim = []
nocomp_stim_name = []
control_stim = []

for trial in range(n_tpc):
    for cc in range(n_cond):
        for config, xc in enumerate(space_conditions):
            if config == 0:
                print -30.0, 0.0
                onetone1_twonoise2 = np.nansum([one_1e_tone, two_2_noise],
                                               axis=0)
                fname = ('onetone1_twonoise2_%s_%s_%s.wav' % (config, cc,
                                                              trial))
                taud_stim.append(fname)
                stim.write_wav(fname, onetone1_twonoise2, fs,
                               overwrite=True, verbose=False)
                # control
                if trial <= 4:
                    c_onetone1_twonoise2 = onetone1_twonoise2
                    fname = ('c_onetone1_twonoise2_%s_%s_%s.wav' % (config, cc,
                                                                    trial))
                    control_stim.append(fname)
                    stim.write_wav(fname, c_onetone1_twonoise2, fs,
                                   overwrite=True, verbose=False)

                onenoise1_twotone2 = np.nansum([one_1e_noise, two_2_tone],
                                               axis=0)
                fname = ('onenoise1_twotone2_%s_%s_%s.wav' % (config, cc,
                                                              trial))
                taud_stim.append(fname)
                stim.write_wav(fname, onenoise1_twotone2, fs,
Esempio n. 6
0
    names, codes, isis = parse_list(op.join(stim_dir, list_names[0]))
    names_b, codes_b, isis_b = parse_list(op.join(stim_dir, list_names[1]))
    assert set(names) == set(names_b)
    list_name = list_names[0]  # okay, just process one then
    del list_names

    datas = list()
    print('Reading and resampling stimuli...')
    for ii, (name, code) in enumerate(zip(names, codes)):
        data, fs_read = read_wav(op.join(stim_dir, name), verbose=False)
        assert fs == fs_read
        assert (data[0] == data[1]).all()
        data = data[0]  # one channel
        datas.append(resample(data, fs_out, fs, npad='auto'))
        assert np.isclose(datas[-1].shape[-1] / float(fs_out),
                          data.shape[-1] / float(fs), atol=1e-3)  # 1 ms
    assert len(datas) == len(names)
    rmss = [rms(d) for d in datas]
    factor = rms_out / np.mean(rmss)
    print('Writing stimuli...')
    for name, data in zip(names, datas):
        data *= factor  # RMS mean across stimuli is now our desired value
        write_wav(op.join(out_dir, name), data, fs_out, verbose=False,
                  overwrite=True)
    rmss = np.array([rms(d) for d in datas])
    assert np.isclose(np.mean(rmss), rms_out)
    play_rmss = dB_out + 20 * np.log10(rmss / rms_out)
    print('Assuming a %s dB SPL set in ExperimentController, stimuli will '
          'play with a long-term RMS of:\n[%0.1f, %0.1f] dB'
          % (dB_out, play_rmss.min(), play_rmss.max()))
# -*- coding: utf-8 -*-
"""
=============================
Script 'DAS-cog-load stimuli'
=============================

This script makes spatially-distributed word streams.
"""
# Author: Dan McCloy <*****@*****.**>
#
# License: BSD (3-clause)

import os.path as op
from glob import glob
from expyfun.stimuli import rms, read_wav, write_wav

indir = 'monotonizedWords'
outdir = 'normalizedWords'
target_rms = 0.01

files = glob(op.join(indir, '*.wav'))

for f in files:
    fname = op.split(f)[-1]
    wav, fs = read_wav(f)
    new_wav = wav * target_rms / rms(wav)
    write_wav(op.join(outdir, fname), new_wav, fs)
Esempio n. 8
0
regions = ['NW', 'CH']
talkers = [str(i) for i in range(5)]
loc = ['C', 'S']
az = [0, -60]

n_talkers = 3
wavs = [[np.zeros((2, 0)) for _ in range(2)] for _ in range(2)]
for ti, t in enumerate(talkers[:n_talkers]):
    #for ri, r in enumerate(regions):
    ri = 0
    r = regions[ri]
    wav, fs = stim.read_wav(join(opath, r + t + '.wav'))
    for gi, g in enumerate(genders):
        for li, (l, a) in enumerate(zip(loc, az)):
            wav_loc = stim.convolve_hrtf(wav[gi], fs, a)
            lens = [wavs[gi][li].shape[-1], wav_loc.shape[-1]]
            dl = lens[0] - lens[1]
            if dl < 0:
                wavs[gi][li] = np.concatenate((wavs[gi][li],
                                               np.zeros((2, -dl))), -1)
            if dl > 0:
                wav_loc = np.concatenate((wav_loc, np.zeros((2, dl))), -1)
            wavs[gi][li] += wav_loc

for gi, g in enumerate(genders):
    for li, (l, a) in enumerate(zip(loc, az)):
        fn = join(spath, '%s_%s_%s.wav' % (r, g, l))
        data = wavs[gi][li] / np.sqrt(n_talkers)
        data = np.minimum(1, np.maximum(-1, data))
        stim.write_wav(fn, data, fs, overwrite=True)