def log_amp_test(): # tests logarithmic scaling of amplitude factors signal = WAV(african) * Repan(0, None) #switch L/R signal += 0.125 * WAV(african) * Repan(None, 1) audio = signal.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.5) play_Audio(audio, is_wait=True) pass
def test_transform_chain(): s = WAV(african)[10e3:20e3] t = MovingAverage(5) * Fade(duration=0.5e3) t *= Gain(Line(0, -10, 3e3) | Line(-10, 0, 5e3)) s *= t audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def test_amplitude_param(): s = WAV(african)[15e3:25e3] c1 = Constant(1, 3e3) | Line(1, 0.01, duration=7e3) c2 = SineCurve(frequency=3, depth=0.3, baseline=0.7, duration=10e3) s *= Amplitude(c1, c2) audio = s.mixdown(sample_rate=24000, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def test_reverse_channels(): s = WAV(african)[15e3:25e3] s[0] += (WAV(african)[15e3:35e3:2] * Gain(-6))[1] s[1] += (WAV(african)[15e3:45e3:3] * Gain(-6))[0] s = s[1::-1] audio = s.mixdown(sample_rate=24000, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def test_to_mono(): s = WAV(african)[15e3:25e3] ### s = sum(s) # raises error - we do not know how many channels there are s = s[0] + s[1] # use this #s *= Mono() # or this audio = s.mixdown(sample_rate=24000, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def test_gain_param(): s = WAV(african)[15e3:28e3] c1 = Line(-80, 0, duration=8e3) c2 = Line(-40, -6, duration=4e3) s *= Gain(c1, c2) s[1, 4e3:] *= Gain(-6) audio = s.mixdown(sample_rate=24000, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def cancellation_test(): #signal = Sine(duration=5000) - 0.999*Sine(duration=5000) #signal += 0.01*Sine(frequency=130,duration=1000) signal = WAV(african) - WAV(african) * Repan(1, 0) #signal += 0.5*WAV(african) # basically neautralized the center "channel" #signal += 5.0*WAV(african) # strengthens center audio = signal.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio, is_wait=True)
def concat_overload_test(): #s = Sine(frequency=250, duration=2e3) | Triangle(frequency=300, duration=3e3) s = WAV(african) s = s[5e3:5.5e3] | s[6e3:6.8e3] | s[11e3:13e3] * Reverse() + Sine( frequency=300, duration=2e3) | s[9e3:10.8e3] audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def IIR_one_pole_filters_test(): s = WAV(african)[10e3:20e3] s[:5e3] *= IIR_OnePole_LowPass(880) s[5e3:] *= IIR_OnePole_HighPass(440) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) #play_Audio(audio) export_test(audio, IIR_one_pole_filters_test)
def IIR_one_pole_test(): s = WAV(african)[10e3:20e3] Fc = 880 / 44100 b1 = np.e**(-2 * np.pi * Fc) s[:, :5e3] *= IIR_OnePole(1 - b1, b1) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) #play_Audio(audio) export_test(audio, IIR_one_pole_test)
def averagesample_test(filename): wav = WAV(filename) #wav *= FIR(5,4,3,2,1,2,3,4,5) wav *= FIR(1, 1, 1, 1, 1, 1, 1, 1, 1) #wav *= FIR(25,16,9,4,1,4,9,16,25) #wav *= FIR(1,0,0,0,0,0,0,0,1) #wav *= FIR(1,-1,1,-1,1,-1,1,-1,1) #wav *= FIR(-1,-1,-1,-1,10,-1,-1,-1,-1) # high pass! audio = wav.mixdown(sample_rate=44100, byte_width=2) play_Audio(audio, is_wait=True)
def slice_test(): hihat = WAV(african)[:1061] * MovingAverage(5) hihat **= 30 part = WAV(african)[5 * 1061:5 * 1061 + 3 * 1061] part **= 20 s = WAV(african) + part * Shift(4 * 1061) * Gain(-6) - hihat #s = hihat audio = s.mixdown(sample_rate=32000, byte_width=2, max_amplitude=0.2) #play_Audio(audio) export_test(audio, slice_test)
def WAV_test(filename=""): wav = WAV(filename) wav *= SineAM(frequency=0.06, size=0.3) wav *= Fade(is_in=True, duration=10) wav += 0.03 * WhiteNoise(duration=20 * 1000) * SineAM(frequency=0.03, size=0.2) wav += (0.06 * 0.7) * Triangle(frequency=230, duration=30) * Fade( is_in=True, duration=3 * 1000)[0:2] audio = wav.mixdown(sample_rate=44100, byte_width=2)
def dummy_reverb_test(): #amp = lambda x: #wav = WAV(african) + WAV(african)*Amplitude(amp)*Shift(duration=500) #wav = WAV(african)*SineAM(frequency=0.12, size=0.25) #wav += WAV(african)*SineAM(frequency=0.12, size=0.25, phase=np.pi)*Shift(duration=500)*FIR(1,1,1,1,1,1,1,1,1) wav = sum([(1 - 8 / 10) * WAV(african) * Shift(duration=100 * x) * MovingAverage(2 * x + 1) for x in range(5)]) wav += 0.6 * WAV(african) * Downsample(factor=5) * MovingAverage(5) audio = wav.mixdown(sample_rate=44100, byte_width=2) play_Audio(audio, is_wait=True)
def reverse_phase_test(): s = WAV(african) L = s[10e3:11e3] | -s[11e3:12e3] | s[12e3:13e3] | -s[13e3:14e3] | s[14e3:15e3] \ | -s[15e3:16e3] | s[16e3:17e3] R = s[10e3:17e3] s = L * Repan(0, None) + R * Repan(None, 1) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) #play_Audio(audio) export_test(audio, reverse_phase_test)
def slice_set_test(): s = WAV(african) # careful with 5e3, creates float slices #s[5e3:18e3] = s[5e3:18e3]*Repan() + s[5e3:18e3]*Downsample(5)*Gain(-3) #s[5e3:18e3] *= Repan(1,0) s[5e3:18e3] = s[5e3:18e3] * Repan(1, None) + s[5e3:18e3] * Repan(None, 0) # TODO found a bug here? does it really keep both copies of the slice separate? # also test s = s[0:50] & sine() & s[50:100] audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def repan_reverb_test(): # TODO these aren't relevant for new pan wav = sum([ (1 - 8 / 10) * WAV(african) * Shift(duration=100 * x) * MovingAverage(2 * x + 1) * Pan(*(1, 0.3)[::(1 if x % 2 == 0 else -1)]) for x in range(5) ]) wav += 0.6 * WAV(african) * Pan( 0, None) * Downsample(factor=5) * MovingAverage(5) wav += 0.6 * WAV(african) * Pan(None, 1) audio = wav.mixdown(sample_rate=44100, byte_width=2) play_Audio(audio, is_wait=True)
def concat_scalar_test(): s = WAV(african) gap = 0.03 L = s[10e3:11e3] | gap | s[11e3:12e3] | gap | s[12e3:13e3] | gap | s[13e3:14e3] \ | gap | s[14e3:15e3] | gap | s[15e3:16e3] | gap | s[16e3:24e3] R = s[10e3:18e3] | gap | s[18e3:19e3] | gap | s[19e3:20e3] | gap | s[20e3:21e3] \ | gap | s[21e3:24e3] s = L * Repan(0, None) + R * Repan(None, 1) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def messy_random_concat_test(): s = WAV(african) max_length = 20e3 def messy_track(): t = 0 temp = 0 while temp < max_length: duration = 400 + np.random.random() * 3e3 temp += duration start = 4e3 + (30 - 4) * np.random.random() * 1e3 t |= s[start:start + duration] return t L = messy_track() + messy_track() R = messy_track() + messy_track() s = L * Repan(0, None) + R * Repan(None, 1) t = sum([(1 - 8 / 10) * s * Shift(duration=100 * x) * MovingAverage(width=2 * x + 1) for x in range(5)]) t += 0.6 * s * Downsample(factor=5) * MovingAverage(width=5) audio = t.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def Butterworth_experiment(): s = WAV(african)[10e3:25e3] s1 = s[0] * Butterworth(cutoff=880) c = Line(-100, 100, 13e3) s2 = s[1] * Pan(c) t = s1[0:2] + s2 audio = t.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def after_test_2(): # test ** as repeat s = Signal.concat(Sine(midC(-7 + 12), 1e3), Sine(midC(-3), 1e3), Sine(midC(-8 + 12), 1e3), Sine(midC(-1), 1e3)) s **= 5 s += WAV(african) print(s) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio, is_wait=True)
def pan_stereo_test(): s = WAV(african)[10e3:30e3] t = s[0] * Pan(Line(-100, 50, 20e3)) + s[1] * Pan(Line(0, 100, 20e3)) # stereo signal panned in space from (-100,0) to (50,100) # the stereo field moves right gradually as well as expanding # from an opening of 90 degrees to 45 degrees (with headphones) Pan.panLaw = -3 audio = t.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def __init__(self, response): assert isinstance( response, (Audio, np.ndarray, str)) # Audio, direct buffer, or filename # TODO: accept Signal as response? assert "scipy" in _supported, "Convolution: SciPy not supported" if isinstance(response, np.ndarray): self.response = response # TODO cache this using hash elif isinstance(response, Audio): self.response = response.audio else: from gensound.signals import WAV self.response = WAV(response).audio # load file directly to Audio # TODO consider converting self.response to Audio if len( self.response.shape ) == 1: # ensure dimensionality. TODO too similar to Audio.ensure_2d! self.response.resize((1, self.response.shape[0]))
def channel_slice_test(): # series of tests #s = WAV(african)[5e3:15e3] # t = s[0] # t = s[1,1e3:7e3] # t = s[1e3:7e3] #s[1] = 0.132*WhiteNoise(duration=10e3)#*Gain(-20) #s[0,3e3:7e3] = s[1,2e3:6e3] #s[0,1e3:6e3] += 0.13*Sine(frequency=midC(8)) #s[0,1e3:5e3] *= Reverse() #s[1] = s[0]*Gain(-6) # etc... ########## s = 0.1 * Sine() s[1] = WAV(african)[0, 5e3:15e3] audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) play_Audio(audio)
def one_impulse_reverb_test(): from gensound.effects import OneImpulseReverb s = WAV(african)[10e3:20e3] * OneImpulseReverb( mix=1.2, num=2000, curve="steep") s.play(44100, max_amplitude=0.2) export_test(s.mixdown(44100), one_impulse_reverb_test)
def guitar_amp_test(): s = WAV(gtrcln) * Gain(20) * GuitarAmp_Test(harshness=10, cutoff=4000) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) export_test(audio, guitar_amp_test)
def crossfade_bitransform_syntax_test(): s = WAV(african)[10e3:20e3] s = s[:5e3] | CrossFade(duration=0.5e3) | s[5e3:] audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2)
def test_negative_shift_combine(): s = WAV(african)[10e3:20e3] s[5e3:] = s[5e3:] * Shift(1e3) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) export_test(audio, test_negative_shift_combine)
def test_negative_shift(): s = WAV(african)[10e3:20e3] s = s[:5e3] | s[5e3:] * Shift(-2.5e3) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) #export_test(audio, test_negative_shift) play_Audio(audio)
def IIR_general_test(): s = WAV(african)[10e3:20e3] s[3e3:] *= IIR_general([0, -0.5, 0, 0], [0.25, 0.15, 0.07, 0.03]) audio = s.mixdown(sample_rate=44100, byte_width=2, max_amplitude=0.2) # play_Audio(audio) export_test(audio, IIR_general_test)