def demo_song(): synth = WaveSynth() notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])} tempo = 0.3 def synth_sample(freq, duration): harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)] a = synth.harmonics(freq, duration, harmonics) return a.envelope(0.05, 0.2, 0.8, 0.5) silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1) song = "A A B. A D. C#.. ; A A B. A E. D.. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "\ "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; " with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential", queue_size=50) as out: for note in song.split(): if note == ";": print() out.play_sample(silence) continue print(note, end=" ", flush=True) if note.endswith(".."): sample = synth_sample(notes[note[:-2]], tempo*4) elif note.endswith("."): sample = synth_sample(notes[note[:-1]], tempo*2) else: sample = synth_sample(notes[note], tempo) out.play_sample(sample) print() out.wait_all_played()
def generate_sample(self, oscillator, duration, use_fade=False): scale = 2**(8*self.synth.samplewidth-1) try: frames = [int(v*scale) for v in itertools.islice(oscillator, int(self.synth.samplerate*duration))] except StopIteration: return None else: sample = Sample.from_array(frames, self.synth.samplerate, 1) if use_fade: sample.fadein(0.05).fadeout(0.1) return sample
def chunked_frame_data(self, chunksize, repeat=False, stopcondition=lambda: False): played_duration = 0.0 num_frames = chunksize // self.samplewidth // self.nchannels scale = 2 ** (8 * self.samplewidth - 1) while played_duration < self.max_play_duration: try: frames = [int(v * scale) for v in itertools.islice(self.oscillator, num_frames)] except StopIteration: break else: sample = Sample.from_array(frames, self.samplerate, 1) yield sample.view_frame_data() played_duration += num_frames / self.samplerate
def generate_sample(self, oscillator: Oscillator, duration: float, use_fade: bool = False) -> Optional[Sample]: scale = 2**(8*self.synth.samplewidth-1) blocks = oscillator.blocks() try: sample_blocks = list(next(blocks) for _ in range(int(self.synth.samplerate*duration/params.norm_osc_blocksize))) float_frames = sum(sample_blocks, []) frames = [int(v*scale) for v in float_frames] except StopIteration: return None else: sample = Sample.from_array(frames, self.synth.samplerate, 1) if use_fade: sample.fadein(0.05).fadeout(0.1) return sample
def echo_lfo(): synth = WaveSynth(22050) s = Sine(440, amplitude=25000, samplerate=synth.samplerate) s = EnvelopeFilter(s, .2, .2, 0, 0, 1.5, stop_at_end=True) s = EchoFilter(s, .15, 5, 0.3, 0.6) s = ClipFilter(s, -32000, 32000) frames = [int(v) for v in s] import matplotlib.pyplot as plot plot.plot(frames) plot.show() samp = Sample.from_array(frames, synth.samplerate, 1) with Output.for_sample(samp) as out: out.play_sample(samp) out.wait_all_played()
def chunked_frame_data(self, chunksize, repeat=False, stopcondition=lambda: False): num_frames = chunksize // self.samplewidth // self.nchannels if num_frames != params.norm_osc_blocksize: raise ValueError("streaming osc num_frames must be equal to the oscillator blocksize") played_duration = 0.0 scale = 2 ** (8 * self.samplewidth - 1) while played_duration < self.max_play_duration: try: frames = [int(v * scale) for v in next(self.blocks)] except StopIteration: break else: sample = Sample.from_array(frames, self.samplerate, 1) yield sample.view_frame_data() played_duration += num_frames / self.samplerate
def demo_song(profiling=False): synth = WaveSynth() notes = {note: key_freq(49+i) for i, note in enumerate(['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#'])} tempo = 0.3 def instrument(freq, duration): harmonics = [(1, 1), (2, 1/2), (4, 1/4), (6, 1/6)] a = synth.harmonics(freq, duration, harmonics) return a.envelope(0.05, 0.2, 0.8, 0.5) print("Synthesizing tones...") perf_c = time.perf_counter() quarter_notes = {note: instrument(notes[note], tempo) for note in notes} half_notes = {note: instrument(notes[note], tempo*2) for note in notes} full_notes = {note: instrument(notes[note], tempo*4) for note in notes} silence = Sample.from_array([0]*int(synth.samplerate*tempo*2), synth.samplerate, numchannels=1) if profiling: print(time.perf_counter()-perf_c) else: song = "A A B. A D. C#.. ; A A B. A E. D.. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; "\ "A A B. A D C#.. ; A A B. A E D. ; A A A. F#.. D C#.. B ; G G F#.. D E D ; ; " with Output(synth.samplerate, synth.samplewidth, 1, mixing="sequential") as out: for note in song.split(): if note == ";": print() out.play_sample(silence) continue print(note, end=" ", flush=True) if note.endswith(".."): sample = full_notes[note[:-2]] elif note.endswith("."): sample = half_notes[note[:-1]] else: sample = quarter_notes[note] out.play_sample(sample) print() out.wait_all_played()