def play_excerpt(): global repeat_flag current_playing_index = -1 start = 0 frame_length = 16384 samples = [] while True: if current_playing_index == -1: current_playing_index = current_excerpt_index samples = read_samples() if samples is None: break samples *= pow(2, 14) start = 0 end = min(start + frame_length, len(samples)) frame = samples[start:end] stream.write(raw_audio_string(frame)) start += frame_length repeat_flag = False elif current_playing_index != current_excerpt_index: end = min(start + frame_length, len(samples)) frame = samples[start:end] for i in xrange(len(frame)): frame[i] *= (1.0 - float(i) / len(frame)) stream.write(raw_audio_string(frame)) start = 0 samples = read_samples() if samples is None: break samples *= pow(2, 14) current_playing_index = current_excerpt_index repeat_flag = False else: end = min(start + frame_length, len(samples)) frame = samples[start:end] # check whether it is the last frame of the excerpt if start + frame_length >= len(samples): enable_button() for i in xrange(len(frame)): frame[i] *= (1.0 - float(i) / len(frame)) stream.write(raw_audio_string(frame)) if repeat_flag == True: start = 0 repeat_flag = False else: start += frame_length sleep(1) else: stream.write(raw_audio_string(frame)) start += frame_length
def sleep(self, seconds): if hasattr(self, 'wav'): samples = fs.raw_audio_string(self.fsynth.get_samples( int(seconds * 44100))) self.wav.writeframes(''.join(samples)) else: time.sleep(seconds)
def play(s): if type(s) is list: s = numpy.array(s) if type(s) is numpy.ndarray: s = raw_audio_string(s) elif type(s) is not str: s = s.getvalue() strmQueue.put(s)
def play_sound(self): self.sound_arr = [] self.fl = fluidsynth.Synth() self.sfid = self.fl.sfload("sound/KawaiStereoGrand.sf2") self.pa = pyaudio.PyAudio() self.strm = self.pa.open( format = pyaudio.paInt16, channels = 2, rate = 44100, output = True) self.fl.program_select(0, self.sfid, 0, 0) self.fl.noteon(self.track, self.sound_note, 127) # Chord is held for 2 seconds self.sound_arr = numpy.append(self.sound_arr, self.fl.get_samples(44100 * 1)) self.fl.noteoff(self.track, self.sound_note) # Decay of chord is held for 1 second # self.sound_arr = numpy.append(self.sound_arr, self.fl.get_samples(44100 * 1)) self.fl.delete() self.samps = fluidsynth.raw_audio_string(self.sound_arr) self.strm.write(self.samps) self.strm.close() self.pa.terminate()
def play_excerpt(path): global play_status, check_buttons, radio_buttons for check_button in check_buttons: check_button['state'] = tk.DISABLED for radio_button in radio_buttons: radio_button['state'] = tk.DISABLED samples = read_samples(path) start = 0 frame_length = 16384 audio = PyAudio() stream = audio.open(format=paInt16, channels=1, rate=44100, output=True) while True: end = min(len(samples), start + frame_length) frame = samples[start:end] stream.write(raw_audio_string(frame)) start += frame_length if start >= len(samples): break play_status = False stream.stop_stream() stream.close() audio.terminate() for check_button in check_buttons: check_button['state'] = tk.NORMAL for radio_button in radio_buttons: radio_button['state'] = tk.NORMAL
def main(self): while True: if self.eventnum < len(self.events) and self._playing: event = self.events[self.eventnum] self.eventnum += 1 delta = event.tick - self.time while delta > 0: bdelta = delta if delta > MAXDELTA: bdelta = MAXDELTA self.time += bdelta n = int(FREQ * bdelta / self.resolution * 60 / self.tempo) s = self.fs.get_samples(n) samps = fluidsynth.raw_audio_string(s) self.strm.write(samps) delta -= bdelta self.sendUpdate() if not self._playing: break self.do_event(event) self.sendUpdate() else: time.sleep(0.01) if self._abort: return
def sleep(self, seconds): if hasattr(self, 'wav'): samples = fs.raw_audio_string( self.fs.get_samples(int(seconds * 44100))) self.wav.writeframes(''.join(samples)) else: time.sleep(seconds)
def play_notes(*notes,velocity=100,last=False): samples = [] notes_temp = [] notes = list(notes) notes.sort(key=lambda n: n.rhythm.value) for note in notes: midi_num = note.hard_pitch + 12 fl.noteon(0,midi_num,velocity) notes_temp.append((midi_num,note.rhythm.value)) frames = round(SAMPLE_RATE * (60 / TEMPO) * (notes[0].rhythm.value / 128)) samples = np.append(samples,fl.get_samples(frames)) previous = None for midi_val,rhythm_val in notes_temp: if previous: if previous < rhythm_val: rhythm_len = (60 / TEMPO) * (note.rhythm.value / 128) proportion_to_prev = (rhythm_len - ((60 / TEMPO) * (previous / 128))) / rhythm_len TurnOffLater(midi_val,(60 / TEMPO) * (note.rhythm.value / 128) * 0.9).start() else: fl.noteoff(0,midi_val) else: fl.noteoff(0,midi_val) previous = rhythm_val samples = np.append(samples,fl.get_samples(round(SAMPLE_RATE * (0.02 if not last else 1)))) strm.write(fluidsynth.raw_audio_string(samples))
def play_note(self, note, duration): #self.mixer.setvolume(100) s = [] self.synth.noteon(0, self._note_freqs.get_note_midi(note), 127) s = numpy.append(s, self.synth.get_samples(int(44100 * duration))) self.synth.noteoff(0, self._note_freqs.get_note_midi(note)) s = numpy.append(s, self.synth.get_samples(1)) self.pcm.write(fluidsynth.raw_audio_string(s))
def _genTunes(self): """ Fudging audio creation for now by just having sounds made that all belong to one private list """ self._tunes=[] for i in xrange(10,0,-1): s=[] self._fl.noteon(0, 60+7*i, 120) s = numpy.append(s, self._fl.get_samples(int(44100 * 0.3))) self._fl.noteoff(0, 60+7*i) self._tunes.append(fluidsynth.raw_audio_string(s)) # s=[] #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) # self._fl.noteon(0, 60, 120) # s = numpy.append(s, self._fl.get_samples(int(44100 * 0.3))) # self._fl.noteoff(0, 60) #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) # self._tunes.append(fluidsynth.raw_audio_string(s)) #s=[] #self._fl.noteon(0, 67, 120) #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.3))) #self._fl.noteoff(0, 67) #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) #self._tunes.append(fluidsynth.raw_audio_string(s)) # s=[] #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) # self._fl.noteon(0, 76, 120) # s = numpy.append(s, self._fl.get_samples(int(44100 * 0.3))) # self._fl.noteoff(0, 76) #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) # self._tunes.append(fluidsynth.raw_audio_string(s)) #s=[] #s = numpy.append(s, self._fl.get_samples(int(44100 * 0.1))) #for i in xrange(0,32,4): # self._fl.noteon(0, 60+i, 120) # s = numpy.append(s, self._fl.get_samples(int(44100 * 0.2))) # self._fl.noteoff(0, i) #for i in xrange(32,0,-4): # self._fl.noteon(0, 60+i, 120) # s = numpy.append(s, self._fl.get_samples(int(44100 * 0.2))) # self._fl.noteoff(0, i) #self._tunes.append(fluidsynth.raw_audio_string(s)) return
def main(): path_dir = '/Users/hongyu/Desktop/Test/excerpts/' output_dir = '/Users/hongyu/Desktop/Test/excerpts-refined/' filenames = listdir(path_dir) for name in filenames: if name.endswith('.wav'): samples = read_samples(path_dir + name) samples = add_fade_in_fade_out(samples) fout = waveopen(output_dir + name, 'w') fout.setframerate(44100) fout.setnchannels(2) fout.setsampwidth(2) fout.writeframes(raw_audio_string(samples)) fout.close() return
def play_notes(): """ Pulls in note positions from javascript, creates a random filename, and writes .wav file representing those notes. Inputs: No direct arguments, but ... Pulls in JSON of note positions via flask Outputs: filename prefix (sends to javascript) """ data = flask.request.json if data["notes"] == "": return flask.jsonify(data) rand_id = str(uuid.uuid4().hex)[:6] note_data, _ = notei.make_note_stack(data["notes"]) s = [] fs = fluidsynth.Synth() sfid = fs.sfload(absolute_path + "FluidR3_GM.sf2") fs.program_select(0, sfid, 0, 0) for tick, notes in note_data: if notes != ["x"]: for val in notes: fs.noteon(0, int(val), 100) s = np.append(s, fs.get_samples(int(44100 * 0.2))) for val in notes: fs.noteoff(0, int(val)) else: s = np.append(s, fs.get_samples(int(44100 * 0.2))) fs.delete() samps = fluidsynth.raw_audio_string(s) wf = wave.open(absolute_path + "static/" + rand_id + ".wav", "wb") wf.setnchannels(2) wf.setframerate(44100) wf.setsampwidth(2) wf.writeframes(b"".join(samps)) wf.close() return flask.jsonify({"id": rand_id})
def get_audio_stream_for_note(self, note): """ noteon -> channel, key, velocity """ self.initialize() stream = [] self.fs.noteon(0, note.value, note.velocity) # note duration is in sec stream = np.append(stream, self.fs.get_samples(SAMPLE_RATE * note.duration)) self.fs.noteoff(0, note.value) # 1 sec decay of the note stream = np.append(stream, self.fs.get_samples(SAMPLE_RATE * 1)) self.finish() return fluidsynth.raw_audio_string(stream)
def test_returning_data(): fs = fluidsynth.Synth() sfid = fs.sfload("FluidR3_GM2-2.SF2") fs.program_select(0, sfid, 0, 0) fs.noteon(0, 60, 30) fs.noteon(0, 67, 30) fs.noteon(0, 76, 30) time.sleep(1.0) fs.noteoff(0, 60) fs.noteoff(0, 67) fs.noteoff(0, 76) time.sleep(1.0) samples = fs.get_samples(1024) fs.delete() return fluidsynth.raw_audio_string(samples)
def streamcopy(stream): s = StringIO() c = 0 if hasattr(stream, "read"): while True: buf = stream.read(2048) if len(buf) == 0: break s.write(buf) c += len(buf) else: for data in stream: if type(data) is numpy.ndarray: buf = raw_audio_string(data) elif type(data) is str: buf = str(data) else: assert False, "cannot handle " + repr(data) s.write(buf) c += len(buf) s.seek(0) return s
def convert_pattern_to_samples(self, pattern, instruments, unit, dynamic_offset=0): while self.status == False: sleep(0.01) resolution = pattern.resolution tempo = pattern[0][0].get_bpm() sampling_rate = 44100.0 array_length = int(unit * 60.0 / tempo * sampling_rate) digital_filter = self.digital_filter if len(instruments) < 3: print 'error in loading instruments' num_bars_trans = 4 if len(self.last_instruments) != 0 and \ ((self.last_instruments[0]['ID'] != instruments[0]['ID']) or \ (self.last_instruments[1]['ID'] != instruments[1]['ID']) or \ (self.last_instruments[2]['ID'] != instruments[2]['ID'])): instruments.append(self.last_instruments[0]) instruments.append(self.last_instruments[1]) instruments.append(self.last_instruments[2]) pattern.append(MIDI.copy_track(pattern[0], channel_offset=3)) pattern.append(MIDI.copy_track(pattern[1], channel_offset=3)) pattern.append(MIDI.copy_track(pattern[2], channel_offset=3)) # whether in transition state or not, 0 is not a transition state if self.trans == 0: self.trans = num_bars_trans if self.trans > 0: self.trans -= 1 if self.trans <= 0: if len(self.last_instruments) == 0: self.last_instruments.append(instruments[0]) self.last_instruments.append(instruments[1]) self.last_instruments.append(instruments[2]) else: self.last_instruments[0] = instruments[0] self.last_instruments[1] = instruments[1] self.last_instruments[2] = instruments[2] print 'Melody: ' + instruments[0]['Name'] print 'Harmony: ' + instruments[1]['Name'] + ', ' + instruments[2][ 'Name'] print '' for i in range(min(len(instruments), len(self.synths))): self.synths[i].program_select(i, self.sfids[i], 0, instruments[i]['ID']) # generate samples samples = [] for i in range(min(len(pattern), len(self.synths))): sample = [] track = pattern[i] for event in track: if event.tick != 0: length = int(event.tick / float(resolution) * 60.0 / tempo * 44100) sample = append(sample, self.synths[i].get_samples(length)) if type(event) is NoteOnEvent: self.synths[i].noteon(event.channel, event.pitch, event.velocity) if type(event) is NoteOffEvent: self.synths[i].noteoff(event.channel, event.pitch) if len(sample) < array_length * 2: sample = append( sample, self.synths[i].get_samples(array_length - len(sample) / 2)) samples.append(sample) # combine different instruments len_trans = float(array_length * 2) len_smooth_trans = 16.0 smooth_dec = [(cos(i * pi / len_smooth_trans) + 1) / 2.0 for i in xrange(int(len_smooth_trans))] smooth_inc = [(-cos(i * pi / len_smooth_trans) + 1) / 2.0 for i in xrange(int(len_smooth_trans))] if len(instruments) == 6: coeff_fade_in = array(xrange(int(len_trans))) coeff_fade_in = coeff_fade_in / (len_trans * num_bars_trans) + ( num_bars_trans - 1 - self.trans) / 4.0 coeff_fade_out = array(xrange(int(len_trans), 0, -1)) coeff_fade_out = coeff_fade_out / ( len_trans * num_bars_trans) + self.trans / 4.0 # avoid the signal suddenly change if self.trans == num_bars_trans - 1: coeff_fade_out[:len(smooth_inc)] = smooth_inc coeff_fade_in[:len(smooth_dec)] = smooth_dec samples[0] *= coeff_fade_in samples[1] *= coeff_fade_in samples[2] *= coeff_fade_in samples[3] *= coeff_fade_out samples[4] *= coeff_fade_out samples[5] *= coeff_fade_out combined_samples = samples[0] + samples[1] + samples[2] + samples[ 3] + samples[4] + samples[5] else: combined_samples = samples[0] + samples[1] + samples[2] if self.trans <= 0: self.synths[3].system_reset() self.synths[4].system_reset() self.synths[5].system_reset() # filter num_samples_mono = len(combined_samples) / 2 num_samples_tail = int(sampling_rate * 0.5) reshaped_samples = combined_samples.reshape(num_samples_mono, 2) left_channel = array( list(reshaped_samples[:, 0]) + [0] * num_samples_tail) right_channel = array( list(reshaped_samples[:, 1]) + [0] * num_samples_tail) if self.overdriven_coeff < 1: max_amp = max(max(abs(left_channel)), max(abs(right_channel))) left_channel = minimum(left_channel, max_amp * self.overdriven_coeff) right_channel = minimum(right_channel, max_amp * self.overdriven_coeff) left_channel = lfilter(digital_filter['b'], digital_filter['a'], left_channel) right_channel = lfilter(digital_filter['b'], digital_filter['a'], right_channel) num_samples = array_length + num_samples_tail reverb_delay_samples = int(self.reverb_delay_time * sampling_rate) current_reverb_coeff = self.reverb_amount for i in xrange(5): if reverb_delay_samples >= num_samples: break left_channel[reverb_delay_samples:] += \ (left_channel[:num_samples - reverb_delay_samples] * current_reverb_coeff) right_channel[reverb_delay_samples:] += \ (right_channel[:num_samples - reverb_delay_samples] * current_reverb_coeff) reverb_delay_samples *= 2 current_reverb_coeff *= current_reverb_coeff # calculate the perceptual volume frame_length = 1024 c = exp(-1.0 / frame_length) (ear_b, ear_a) = (Synthesizer.ear_b, Synthesizer.ear_a) ear_filtered_left_square = lfilter(ear_b, ear_a, left_channel)**2 ear_filtered_right_square = lfilter(ear_b, ear_a, right_channel)**2 vms_left = lfilter([1 - c], [1, -c], ear_filtered_left_square[:num_samples_mono]) vms_right = lfilter([1 - c], [1, -c], ear_filtered_right_square[:num_samples_mono]) # average the top 20% and calculate the normalization factor iterator = xrange(frame_length - 1, num_samples_mono, frame_length) vdB = [10.0 * log10(max(vms_left[i], vms_right[i])) for i in iterator] vdB.sort(reverse=True) original_volume = mean(vdB[0:len(vdB) / 5]) desired_volume = (60 + dynamic_offset * 0.2) if dynamic_offset > -50 else 0 ratio = sqrt(pow(10, (desired_volume - original_volume) / 10.0)) max_ratio = (pow(2.0, 15) - 1) / max(max(abs(left_channel)), max(abs(right_channel))) ratio = min(ratio, max_ratio) # normalize the left channel and the right channel trans_ratio = [ ratio * smooth_inc[i] + self.last_ratio * smooth_dec[i] for i in xrange(int(len_smooth_trans)) ] left_channel[:int(len_smooth_trans)] *= trans_ratio right_channel[:int(len_smooth_trans)] *= trans_ratio left_channel[:int(len_smooth_trans)] = maximum( minimum(left_channel[:int(len_smooth_trans)], pow(2.0, 15) - 1), -pow(2.0, 15) + 1) left_channel[:int(len_smooth_trans)] = maximum( minimum(left_channel[:int(len_smooth_trans)], pow(2.0, 15) - 1), -pow(2.0, 15) + 1) left_channel[int(len_smooth_trans):] *= ratio right_channel[int(len_smooth_trans):] *= ratio self.last_ratio = ratio # add the previous filter results in order to make the transition smooth if self.left_channel_tail is not None and self.right_channel_tail is not None: left_channel[:num_samples_tail] += self.left_channel_tail right_channel[:num_samples_tail] += self.right_channel_tail combined_samples = ndarray([num_samples_mono, 2]) combined_samples[:, 0] = left_channel[:num_samples_mono] combined_samples[:, 1] = right_channel[:num_samples_mono] combined_samples = combined_samples.flatten() # keep the filter tail self.left_channel_tail = array(left_channel[-num_samples_tail:]) self.right_channel_tail = array(right_channel[-num_samples_tail:]) return raw_audio_string(combined_samples)
def miditowav(inst, mid, sf): #mid = pretty_midi.PrettyMIDI('./Music/kkhouse.mid') #1 pa = pyaudio.PyAudio() sd.query_devices() strm = pa.open(format=pyaudio.paInt16, channels=2, rate=44100, output=True) s = [] #result_array = mid2arry(mid) #selecting soundfont fl = fluidsynth.Synth() # Initial silence is 1 second #s = numpy.append(s, fl.get_samples(44100 * 1)) #fl.start('dsound') sfid = fl.sfload(r'C:\Users\User\Desktop\FluidR3_GM\yk.sf2') sfid = fl.sfload(sf) #selecting instrumnet fl.program_select(0, sfid, 0, 0) startdict = snotetodict(mid, inst) enddict = enotetodict(mid, inst) #notedict=startdict.copy() #notedict.update(enddict) notedict = nnotetodict(mid, inst) instrument = mid.instruments[inst] startarr = [] endarr = [] for note in instrument.notes: startarr.append(note.start) endarr.append(note.end) startkey = startdict.keys() startkey.sort() endkey = enddict.keys() endkey.sort() #delete same notes in notekey notekey = startkey + endkey notekey = set(notekey) notekey = list(notekey) notekey.sort() #print notekey print len(startarr), len(endarr) fl.noteon(0, 0, 0) fl.noteon(0, 30, 98) s = numpy.append(s, fl.get_samples(int(44100 * 1 / 2))) s = numpy.append(s, fl.get_samples(int(44100 * notekey[0] / 2))) playtime = {} #print notedict print mid.instruments[inst] for note in instrument.notes: fl.noteon(0, note.pitch, 98) s = numpy.append(s, fl.get_samples(int(44100 * 1 / 2))) #fl.noteoff(0,0) fl.delete() samps = fluidsynth.raw_audio_string(s) print(len(s)) print('Starting playback') #strm.write(samps) scaled = numpy.int16(s / numpy.max(numpy.abs(s)) * 32767) name = './Out/inst' + str(inst) + '.wav' write(name, 44100, scaled)
def play_sample(self, audio): upsampled = audio.repeat(self.speaker_sample_rate // self.audio_sample_rate, axis=0) samps = fluidsynth.raw_audio_string(upsampled) print ('Starting playback') self.strm.write(samps)
synth.program_select(0, sfid, 0, 0) sys.stderr.write('\nUnpacking Soundfonts to .wav files.......') for note in notes: config.write('\n%d ' % note) sys.stderr.write('\n%d ' % note) for velocity in velocities: samples = [] synth.noteon(0, note, velocity) samples = numpy.append(samples, synth.get_samples(duration * fs)) synth.noteoff(0, note) samples = numpy.append(samples, synth.get_samples(decay * fs)) s = fluidsynth.raw_audio_string(samples) writer = wave.open('%s/%d_%d.wav' % (pack, note, velocity), 'wb') writer.setparams(wav_parameters) writer.writeframesraw(s) writer.close() config.write('%d ' % velocity) sys.stderr.write('%d ' % velocity) synth.delete() config.write('\n') sys.stderr.write('\n\n')
synth.program_select(0, sfid, 0, 0) sys.stderr.write("\nUnpacking Soundfonts to .wav files.......") for note in notes: config.write("\n%d " % note) sys.stderr.write("\n%d " % note) for velocity in velocities: samples = [] synth.noteon(0, note, velocity) samples = numpy.append(samples, synth.get_samples(duration * fs)) synth.noteoff(0, note) samples = numpy.append(samples, synth.get_samples(decay * fs)) s = fluidsynth.raw_audio_string(samples) writer = wave.open("%s/%d_%d.wav" % (pack, note, velocity), "wb") writer.setparams(wav_parameters) writer.writeframesraw(s) writer.close() config.write("%d " % velocity) sys.stderr.write("%d " % velocity) synth.delete() config.write("\n") sys.stderr.write("\n\n")
def miditowav(self, inst, mid, sf, inst_index=0): #mid = pretty_midi.PrettyMIDI('./Music/kkhouse.mid') #1 pa = pyaudio.PyAudio() sd.query_devices() strm = pa.open(format=pyaudio.paInt16, channels=2, rate=44100, output=True) s = [] #selecting soundfont fl = fluidsynth.Synth() # Initial silence is 1 second #s = numpy.append(s, fl.get_samples(44100 * 1)) #fl.start('dsound') #sfid = fl.sfload(r'C:\Users\User\Desktop\FluidR3_GM\yk.sf2') #sfid = fl.sfload(sf) #selecting instrumnet fl.program_select(0, sfid, 0, inst_index) startdict = snotetodict(mid, inst) enddict = enotetodict(mid, inst) #notedict=startdict.copy() #notedict.update(enddict) notedict = nnotetodict(mid, inst) instrument = mid.instruments[inst] print instrument.is_drum ''' if instrument.is_drum==True: sfid=fl.sfload('C:\Users\User\Desktop\FluidR3_GM\FluidR3_GM.sf2') fl.program_select(10, sfid, 0, 35) ''' startarr = [] endarr = [] for note in instrument.notes: startarr.append(note.start) endarr.append(note.end) startkey = startdict.keys() startkey.sort() endkey = enddict.keys() endkey.sort() #delete same notes in notekey notekey = startkey + endkey notekey = set(notekey) notekey = list(notekey) notekey.sort() print inst, len(startarr), len(endarr) fl.noteon(0, 0, 0) s = numpy.append(s, fl.get_samples(int(44100 * notekey[0] / 2))) playtime = {} notekey.append(notekey[len(notekey) - 1] + 1) for i in range(len(notekey) - 1): term = 0 pl = 0 ''' for note in notedict[notekey[i]]: if notekey[i] == note.start: fl.noteon(0, note.pitch, note.velocity) playtime=note.end-note.start print notekey[i],note.pitch,'start' elif notekey[i] == note.end: s = numpy.append(s, fl.get_samples(int(44100 * playtime / 2))) fl.noteoff(0, note.pitch) print notekey[i],note.pitch, 'end' ''' #print notekey[i],notedict[notekey[i]] for j in range(len(notedict[notekey[i]])): note = notedict[notekey[i]][j] #print "i:",i,"inst:",inst,note if notekey[i] == note.start: #beacuse fluidsynth can't make note which have pitch more than 88(when sf is koto) if note.pitch > 120: fl.noteon(0, note.pitch - 12, note.velocity) # beacuse fluidsynth can't make note which have pitch lee than 48(when sf is koto) #elif note.pitch<48: #fl.noteon(0,note.pitch+12,note.velocity) else: fl.noteon(0, note.pitch, note.velocity) elif notekey[i] == note.end: fl.noteoff(0, note.pitch) p = 0 term = notekey[i + 1] - notekey[i] s = numpy.append(s, fl.get_samples(int(44100 * term / 2))) fl.delete() samps = fluidsynth.raw_audio_string(s) print(len(s)) print('Starting playback') #strm.write(samps) #scaled = numpy.int16(s/numpy.max(numpy.abs(s)) * 32767) scaled = numpy.int16(s * 0.8 / numpy.max(numpy.abs(s)) * 32767) name = './Out/inst' + str(inst) + '.wav' write(name, 44100, scaled) #playsound(name) if self.maxendtime < notekey[len(notekey) - 1]: self.maxendtime = max(notekey[len(notekey) - 1], self.maxendtime) self.maxendindex = inst
s = [] # Initial silence is 1 second s = numpy.append(s, fl.get_samples(44100 * 1)) fl.program_select(0, sfid, 0, i - 1) fl.noteon(0, 60, 127) fl.noteon(0, 67, 127) fl.noteon(0, 76, 127) # Chord is held for 2 seconds s = numpy.append(s, fl.get_samples(int(44100 * 2))) fl.noteoff(0, 60) fl.noteoff(0, 67) fl.noteoff(0, 76) # Decay of chord is held for 1 second s = numpy.append(s, fl.get_samples(int(44100 * 1))) samps = fluidsynth.raw_audio_string(s) print len(samps) print 'Starting playback' pcm.write(samps) fl.delete()
def write_wav(self, seconds): """Forces a write of a 'seconds' long wav.""" if hasattr(self, 'wav'): samples = self.fsynth.get_samples(int(seconds * 44100)) audio = fs.raw_audio_string(samples) self.wav.writeframes(''.join(audio))
# settings = new_fluid_settings() # synth = new_fluid_synth(settings) # sfid = fl.sfload("/Users/yuhaomao/Desktop/pyfluidsynth/test/example.sf2", 7) sfid = fl.sfload("/Users/yuhaomao/Downloads/Steinway B-JNv2.0.sf2") fl.program_select(0, sfid, 0, 0) # (channel, soundfont ID, bank, preset ) fl.noteon(0, 60, 100) # fl.noteon(0, 67, 30) # fl.noteon(0, 76, 30) # fluidsynth.fluid_synth_noteon("",0, 60, 100) # Chord is held for 2 seconds s = numpy.append(s, fl.get_samples(44100 * 5)) print("333") print(s) print("444") fl.noteoff(0, 60) # fl.noteoff(0, 67) # fl.noteoff(0, 76) # Decay of chord is held for 1 second # s = numpy.append(s, fl.get_samples(44100 * 1)) fl.delete() samps = fluidsynth.raw_audio_string(s) # print (len(samps)) # print ('Starting playback') strm.write(samps)
def audio(self): return fluidsynth.raw_audio_string(self.numpy_array)