def test_merge_frames(self): """ Merges frames back into a single array presenting a reconstructed audio signal. """ expected_array = np.array([0, 1, 2, 3, 3.4, 4.25, 6, 7, 6.8, 7.65, 10, 11, 10.2, 11.05, 14, 15, 0, 0]) input_array = segment_axis(np.arange(0, 16), 6, 2, end="pad") fake_lpc_frame_array = LPCFrameArray(1, 2, input_array) fake_synthesizer = Synthesizer(fake_lpc_frame_array) result_array = fake_synthesizer._merge_frames(input_array) assert_array_almost_equal(expected_array, result_array)
def main(): fname = sys.argv[1] title = parse_fname(fname) infile = "../audio/" + fname outfile = "../build/" + title + ".ro" analysis = FFT_Analyzer(infile) analysis.perform_analysis() analysis.stft(20) analysis.get_modal_data(30) out = writeRObU(outfile, analysis.modal_model) out.write() synth = Synthesizer(analysis, title) synth.write_wav()
def record_and_play_audio(): record_seconds = 5 fs = 44100 p = pyaudio.PyAudio() odata = np.zeros(fs * record_seconds) print "Recording audio for the next {0} seconds".format(record_seconds) record_audio(odata, p, fs, record_seconds) wavio.write("before.wav", 44100, odata) print "Audio has recorded, stand by for voice" play_audio(odata, p, fs) print "Encoding and decoding voice through vocoder" analyzer = Analyzer(odata, 10e-3) lpc_frame_array = analyzer.encode() synthesizer = Synthesizer(lpc_frame_array) reconstructed_signal = synthesizer.decode() wavio.write("test.wav", 441000, reconstructed_signal) print "Playing reconstructed audio" play_audio(reconstructed_signal, p, fs) p.terminate()