Esempio n. 1
0
def test_play_sound():
    """Test playing a sound
    """
    data = np.zeros((2, 100))
    play_sound(data).stop()
    play_sound(data[0], norm=False, wait=True)
    assert_raises(ValueError, play_sound, data[:, :, np.newaxis])
Esempio n. 2
0
def test_play_sound():
    """Test playing a sound."""
    data = np.zeros((2, 100))
    play_sound(data).stop()
    play_sound(data[0], norm=False, wait=True)
    assert_raises(ValueError, play_sound, data[:, :, np.newaxis])
    # Make sure Pyglet can handle a lot of sounds
    for _ in range(100):
        snd = play_sound(data)
        # we manually stop and delete here, because we don't want to
        # have to wait for our Timer instances to get around to doing
        # it... this also checks to make sure calling `delete()` more
        # than once is okay (it is).
        snd.stop()
        snd.delete()
Esempio n. 3
0
def test_play_sound():
    """Test playing a sound."""
    data = np.zeros((2, 100))
    play_sound(data).stop()
    play_sound(data[0], norm=False, wait=True)
    assert_raises(ValueError, play_sound, data[:, :, np.newaxis])
    # Make sure Pyglet can handle a lot of sounds
    for _ in range(100):
        snd = play_sound(data)
        # we manually stop and delete here, because we don't want to
        # have to wait for our Timer instances to get around to doing
        # it... this also checks to make sure calling `delete()` more
        # than once is okay (it is).
        snd.stop()
        snd.delete()
Esempio n. 4
0
def test_play_sound(backend, hide_window):  # only works if windowing works
    """Test playing a sound."""
    _check_skip_backend(backend)
    data = np.zeros((2, 100))
    play_sound(data).stop()
    play_sound(data[0], norm=False, wait=True)
    pytest.raises(ValueError, play_sound, data[:, :, np.newaxis])
    # Make sure each backend can handle a lot of sounds
    for _ in range(10):
        snd = play_sound(data)
        # we manually stop and delete here, because we don't want to
        # have to wait for our Timer instances to get around to doing
        # it... this also checks to make sure calling `delete()` more
        # than once is okay (it is).
        snd.stop()
        snd.delete()
Esempio n. 5
0
Generate more advanced auditory stimuli
=======================================

This shows the methods that we provide that facilitate generation
of more advanced stimuli.
"""

import numpy as np

from expyfun.stimuli import convolve_hrtf, play_sound, window_edges

fs = 44100
dur = 0.5
freq = 500.
# let's make a square wave
sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs)
sig = ((sig > 0) - 0.5) / 5.  # make it reasonably quiet for play_sound
sig = window_edges(sig, fs)

play_sound(sig, norm=False, wait=True)

move_sig = np.concatenate([convolve_hrtf(sig, fs, ang)
                           for ang in range(-90, 91, 15)], axis=1)
play_sound(move_sig, norm=False, wait=True)

import matplotlib.pyplot as mpl
mpl.ion()
t = np.arange(move_sig.shape[1]) / float(fs)
mpl.plot(t, move_sig.T)
mpl.xlabel('Time (sec)')
    # generate sinewaves & RMS normalize
    wavs = [np.sin(2 * np.pi * f * t) for f in freqs]
    wavs = [rms / np.sqrt(np.mean(w**2)) * w for w in wavs]

    # collect into dictionary & save
    wav_dict = {n: w for (n, w) in zip(names, wavs)}
    if save_as == 'hdf5':
        num_reps = num_trials // num_freqs + 1
        trials = np.tile(range(num_freqs), num_reps)
        trial_order = rng.permutation(trials[0:num_trials])
        wav_dict.update({
            'trial_order': trial_order,
            'freqs': freqs,
            'fs': fs,
            'rms': rms
        })
        write_hdf5(op.join(output_dir, 'equally_spaced_sinewaves.hdf5'),
                   wav_dict,
                   overwrite=True)
    elif save_as == 'wav':
        for n in names:
            write_wav(op.join(output_dir, n + '.wav'), wav_dict[n], int(fs))
    return wav_dict


if __name__ == '__main__':
    wav_dict = generate_stimuli()
    plt.plot(wav_dict['stim_0_500'][:1000])
    play_sound(wav_dict['stim_0_500'])
    plt.show()
Esempio n. 7
0
# noise vocoder
data_noise = vocode(data, fs, mode='noise')
data_noise = data_noise * 0.01 / rms(data_noise)
# sinewave vocoder
data_tone = vocode(data, fs, mode='tone')
data_tone = data_tone * 0.01 / rms(data_tone)
# poisson vocoder
data_click = vocode(data, fs, mode='poisson', rate=400)
data_click = data_click * 0.01 / rms(data_click)

# combine all three
cutoff = data.shape[-1] // 3
data_allthree = data_noise.copy()
data_allthree[cutoff:2 * cutoff] = data_tone[cutoff:2 * cutoff]
data_allthree[2 * cutoff:] = data_click[2 * cutoff:]
snd = play_sound(data_allthree, fs, norm=False, wait=False)

# Uncomment this to play the original, too:
# snd = play_sound(data, fs, norm=False, wait=False)

mpl.ion()
ax1 = mpl.subplot(3, 1, 1)
ax1.plot(t, data)
ax1.set_title('Original')
ax1.set_ylabel('Amplitude')
ax2 = mpl.subplot(3, 1, 2, sharex=ax1, sharey=ax1)
ax2.plot(t, data_noise)
ax2.set_title('Vocoded')
ax3 = mpl.subplot(3, 1, 3, sharex=ax1)
ax2.set_title('Spectrogram')
ax2.set_ylabel('Amplitude')
Esempio n. 8
0
This shows the methods that we provide that facilitate generation
of more advanced stimuli.
"""

import numpy as np
import matplotlib.pyplot as plt

from expyfun import building_doc
from expyfun.stimuli import convolve_hrtf, play_sound, window_edges

fs = 24414
dur = 0.5
freq = 500.
# let's make a square wave
sig = np.sin(freq * 2 * np.pi * np.arange(dur * fs, dtype=float) / fs)
sig = ((sig > 0) - 0.5) / 5.  # make it reasonably quiet for play_sound
sig = window_edges(sig, fs)

play_sound(sig, fs, norm=False, wait=True)

move_sig = np.concatenate(
    [convolve_hrtf(sig, fs, ang) for ang in range(-90, 91, 15)], axis=1)
if not building_doc:
    play_sound(move_sig, fs, norm=False, wait=True)

t = np.arange(move_sig.shape[1]) / float(fs)
plt.plot(t, move_sig.T)
plt.xlabel('Time (sec)')
plt.show()
    # strings for the filenames / dictionary keys
    freq_names = [str(int(f)) for f in freqs]
    names = ['stim_%s_%s' % (n, f) for n, f in enumerate(freq_names)]

    # generate sinewaves & RMS normalize
    wavs = [np.sin(2 * np.pi * f * t) for f in freqs]
    wavs = [rms / np.sqrt(np.mean(w ** 2)) * w for w in wavs]

    # collect into dictionary & save
    wav_dict = {n: w for (n, w) in zip(names, wavs)}
    if save_as == 'hdf5':
        num_reps = num_trials // num_freqs + 1
        trials = np.tile(range(num_freqs), num_reps)
        trial_order = rng.permutation(trials[0:num_trials])
        wav_dict.update({'trial_order': trial_order, 'freqs': freqs, 'fs': fs,
                         'rms': rms})
        write_hdf5(op.join(output_dir, 'equally_spaced_sinewaves.hdf5'),
                   wav_dict, overwrite=True)
    elif save_as == 'wav':
        for n in names:
            write_wav(op.join(output_dir, n + '.wav'), wav_dict[n], int(fs))
    return wav_dict


if __name__ == '__main__':
    wav_dict = generate_stimuli(save_as=None)
    plt.plot(wav_dict['stim_0_500'][:1000])
    play_sound(wav_dict['stim_0_500'])
    plt.show()
Esempio n. 10
0
# noise vocoder
data_noise = vocode(data, fs, mode='noise')
data_noise = data_noise * 0.01 / rms(data_noise)
# sinewave vocoder
data_tone = vocode(data, fs, mode='tone')
data_tone = data_tone * 0.01 / rms(data_tone)
# poisson vocoder
data_click = vocode(data, fs, mode='poisson', rate=400)
data_click = data_click * 0.01 / rms(data_click)

# combine all three
cutoff = data.shape[-1] // 3
data_allthree = data_noise.copy()
data_allthree[cutoff:2 * cutoff] = data_tone[cutoff:2 * cutoff]
data_allthree[2 * cutoff:] = data_click[2 * cutoff:]
snd = play_sound(data_allthree, fs, norm=False, wait=False)

# Uncomment this to play the original, too:
# snd = play_sound(data, fs, norm=False, wait=False)

ax1 = plt.subplot(3, 1, 1)
ax1.plot(t, data)
ax1.set_title('Original')
ax1.set_ylabel('Amplitude')
ax2 = plt.subplot(3, 1, 2, sharex=ax1, sharey=ax1)
ax2.plot(t, data_noise)
ax2.set_title('Vocoded')
ax3 = plt.subplot(3, 1, 3, sharex=ax1)
ax2.set_title('Spectrogram')
ax2.set_ylabel('Amplitude')
ax3.specgram(data_noise, Fs=fs)
Esempio n. 11
0
This shows how to generate texture coherence stimuli.
"""

import numpy as np
import matplotlib.pyplot as plt

from expyfun.stimuli import texture_ERB, play_sound

fs = 24414
n_freqs = 20
n_coh = 18  # very coherent example

# let's make a textured stimilus and play it
sig = texture_ERB(n_freqs, n_coh, fs=fs, seq=('inc', 'nb', 'sam'))
play_sound(sig, fs, norm=True, wait=True)

###############################################################################
# Let's look at the time course
t = np.arange(len(sig)) / float(fs)
fig, ax = plt.subplots(1)
ax.plot(t, sig.T, color='k')
ax.set(xlabel='Time (sec)', ylabel='Amplitude (normalized)', xlim=t[[0, -1]])
fig.tight_layout()

###############################################################################
# And now the spectrogram:
fig, ax = plt.subplots(1, figsize=(8, 2))
img = ax.specgram(sig, NFFT=1024, Fs=fs, noverlap=800)[3]
img.set_clim([img.get_clim()[1] - 50, img.get_clim()[1]])
ax.set(xlim=t[[0, -1]], ylim=[0, 10000], xlabel='Time (sec)',
Esempio n. 12
0
========================

This shows how to make simple vocoded stimuli.

@author: larsoner
"""

import numpy as np

from expyfun.stimuli import vocode_ci, play_sound, window_edges, read_wav
from expyfun import fetch_data_file

data, fs = read_wav(fetch_data_file('audio/dream.wav'))
data = window_edges(data[0], fs)
t = np.arange(data.size) / float(fs)
data_ci = vocode_ci(data, fs, mode='noise', order=4, verbose=True)

# Uncomment this to play the original, too:
#snd = play_sound(data, fs, norm=False, wait=False)
snd = play_sound(data_ci, fs, norm=False, wait=False)

import matplotlib.pyplot as mpl
mpl.ion()
ax1 = mpl.subplot(2, 1, 1)
ax1.plot(t, data)
ax1.set_title('Original')
ax2 = mpl.subplot(2, 1, 2, sharex=ax1, sharey=ax1)
ax2.plot(t, data_ci)
ax2.set_title('Vocoded')
ax2.set_xlabel('Time (sec)')
Esempio n. 13
0
This shows how to generate texture coherence stimuli.
"""

import numpy as np
import matplotlib.pyplot as plt

from expyfun.stimuli import texture_ERB, play_sound

fs = 24414
n_freqs = 20
n_coh = 18  # very coherent example

# let's make a textured stimilus and play it
sig = texture_ERB(n_freqs, n_coh, fs=fs, seq=('inc', 'nb', 'sam'))
play_sound(sig, fs, norm=True, wait=True)

###############################################################################
# Let's look at the time course
t = np.arange(len(sig)) / float(fs)
fig, ax = plt.subplots(1)
ax.plot(t, sig.T, color='k')
ax.set(xlabel='Time (sec)', ylabel='Amplitude (normalized)', xlim=t[[0, -1]])
fig.tight_layout()

###############################################################################
# And now the spectrogram:
fig, ax = plt.subplots(1, figsize=(8, 2))
img = ax.specgram(sig, NFFT=1024, Fs=fs, noverlap=800)[3]
img.set_clim([img.get_clim()[1] - 50, img.get_clim()[1]])
ax.set(xlim=t[[0, -1]],