def get_rhythm_phrase(self, file_name, genre): mid = MidiFile(file_name) midi_temp = "temp.mid" wav_temp = "temp.wav" print("loading sound font from %s" % file_name.split(genre)[0] + genre + '/' + genre + '.sf2') fs = FluidSynth(file_name.split(genre)[0] + genre + '/' + genre + '.sf2', sample_rate=self.sample_rate) midi_original_tempo = 1000000 * 60 / (mid.tracks[0][5].tempo) target_tempo = self.tempo for note in mid.tracks[0]: note.time = int(note.time * midi_original_tempo / target_tempo) mid.save(midi_temp) fs.midi_to_audio(midi_temp, wav_temp) # os.system("fluidsynth -ni %s %s -F %s -r %s" %(file_name.split(genre)[0] + genre + '/' + genre + '.sf2',midi_temp,wav_temp,self.sample_rate)) sr, loop_data = wavfile.read(wav_temp) # loop_data = np.memmap(wav_temp, np.float32, offset=40) print(loop_data.shape) loop_data = (loop_data[:len(np.array(self.layers[0].data).flatten())] )[::2].reshape(-1, self.frames_per_buffer) print(loop_data.shape) rhythm_data = list(loop_data) rhythm_layer = Queue() rhythm_layer.data = rhythm_data return rhythm_layer
def melody_wav(tempo,key,instrument, chords): melody = melody_generator(key, chords) print(melody) track = 0 channel = 0 time = 0 duration = 1 tempo = tempo volume = 127 midi = MIDIFile(1) midi.addTempo(track,time,tempo) for note in melody: print(note) if len(note) == 1: midi.addNote(track,channel,note[0],time,duration,volume) time+=1 else: print(note[0], note[1]) midi.addNote(track,channel,note[0],time,duration/2,volume) time+=0.5 midi.addNote(track,channel,note[1],time,duration/2,volume) time+=0.5 with open('midi/melody.mid', 'wb') as output_file: midi.writeFile(output_file) fs = FluidSynth(instrument) fs.midi_to_audio('midi/melody.mid', 'wav/melody.wav') lowpass('wav/melody.wav', 'wav/processed_melody.wav', 1200)
def synthMidi(self, midiFile): # Check if the file exists if not os.path.exists(self.storage_path + '/midifiles/' + midiFile): if self.debug: print("Trying to synthesize non-existing file " + midiFile) return # Is this file already synthesized (and do we not want to force a resynth)? if self.isSynthesized(midiFile) and not self.force: if self.debug: print(midiFile + " has already been synthesized.") return # Split the MIDI filename in parts name, ext = midiFile.rsplit('.', 1) # Is this a MIDI file? if ext != 'mid': return # We need FluidSynth fs = FluidSynth() # Synthesize MIDI file fs.midi_to_audio(self.storage_path + '/midifiles/' + midiFile, self.storage_path + '/midisynth/' + name + '.wav') # Print output if necessary if self.debug: print(midiFile + " has been synthesized.")
def createWAV(self, MIDI): WAV = tempfile.NamedTemporaryFile() fs = FluidSynth() fs.midi_to_audio(MIDI.name, WAV.name) WAV.seek(0) MIDI.close() return WAV
def _mid2mp3(self): #mid2wav fs=FluidSynth() fs.midi_to_audio(self.track_name, self.wavpath) #wav2mp3 mp3=AudioSegment.from_wav(self.wavpath).export(self.mp3path, format="mp3") os.remove(self.wavpath)
def music_encrypt(cipher): degrees = [60, 62, 64, 65, 67, 69, 71, 72] scale = ['C', 'D', 'E', 'F', 'G', 'A', 'B', 'C`'] track = 0 channel = 0 time = 0 # In beats duration = 1.00 # In beats tempo = 120 # In BPM volume = 120 # 0-127, as per the MIDI standard cipher_midi = MIDIFile(1) cipher_midi.addTempo(track, time, tempo) cipher = 'hello' j = 0 l = [] i = 0 for x in cipher: l = note_return(x) cipher_midi.addNote(track, channel, degrees[l[1]], time + i, (duration / (pow(2, l[0]))), volume) i = i + 1 midfile = input('enter name of music file: ') midfile = midfile + ".mid" with open(midfile, "wb") as output_file: cipher_midi.writeFile(output_file) fs = FluidSynth() output_wav = input('enter name of wav file: ') + ".wav" fs.midi_to_audio(midfile, output_wav)
def main(): args = parser() pypianoroll_file = abspath(args.input_file) loaded = pypianoroll.load(pypianoroll_file) if args.savepath is None: savepath = os.getcwd() else: savepath = args.savepath os.makedirs(savepath, exist_ok=True) #Save to plot if args.output is None: output_filename = os.path.basename(pypianoroll_file).split(".")[0] output_filename = join(savepath, output_filename) else: output_filename = ags.output output_filename = join(savepath, output_filename) print(output_filename) plot_multitrack(loaded, filename=output_filename + ".svg", preset="frame") #Save to WAV pypianoroll.write(loaded, (output_filename + ".mid")) fs = FluidSynth('FluidR3_GM.sf2') fs.midi_to_audio((output_filename + ".mid"), (output_filename + ".wav"))
def to_midi_wav(melody): # convert to midi filename = melody['raw_filename'] k = melody['duration'] * melody['bpm'] / (np.array(melody['lengths']).sum() * 60) time = 0 track = 0 channel = 0 volume = 100 my_midi = MIDIFile(1) my_midi.addTempo(track, time, melody['bpm']) my_midi.addProgramChange(track, channel, time, 0) for i, pitch in enumerate(melody['midi']): length = k * melody['lengths'][i] if pitch != 'P' and 0 <= pitch <= 255 and length >= 1/16: my_midi.addNote(track, channel, round(pitch), time, length, volume) time = time + k * melody['lengths'][i] midi_filename = 'tmp/' + filename.split('.')[0] + '.mid' with open(midi_filename, "wb") as output_file: my_midi.writeFile(output_file) # convert back to wav filename_processed = 'tmp/'+ filename.split('.')[0] + '_processed' + '.wav' fs = FluidSynth('static/Drama Piano.sf2') fs.midi_to_audio(midi_filename, filename_processed)
def to_wav(self, dir=None): with tempfile.NamedTemporaryFile(suffix='.mid') as fp1: self.write('midi', fp=fp1.name) fs = FluidSynth('/usr/share/sounds/sf2/FluidR3_GM.sf2') with tempfile.NamedTemporaryFile(suffix='.wav', dir=dir, delete=False) as fp2: fs.midi_to_audio(fp1.name, fp2.name) return fp2.name
def playAudio(stream): """Generate audio play from stream.""" midi = stream.write('midi') fs = FluidSynth('/usr/share/soundfonts/FluidR3_GM.sf2') filename = 'audio-{}.wav'.format(uuid.uuid4().hex) fs.midi_to_audio(midi, filename) audio = Audio(filename=filename) os.remove(filename) return audio
def render_audio(path, auxiliarDirectory, out_path): fs = FluidSynth(FLUIDSYNTH_FONTS) fs.midi_to_audio(path, f'{auxiliarDirectory}/aux_audio.wav') sr, signal = read(f'{auxiliarDirectory}/aux_audio.wav') x = signal[:, 0] write(f'{out_path}_audio.mp3', sr, x, 'mp3') os.remove(f'{auxiliarDirectory}/aux_audio.wav')
def make_midi(self, data_list): degrees = [] volume = [] duration = [] # channel = [] track = [] time = [] for i in range(len(data_list)): data_list[i] = data_list[i].strip('\n') current = data_list[i].split(",") degrees.append(int(current[0])) volume.append(int(current[1])) duration.append(float(current[2])) track.append(int(current[3])) time.append(float(current[4])) """ print(degrees) print(volume) print(duration) print(track) print(time) """ # degrees = [38, 42, 57, 38, 57, 42, 38] # MIDI note number # track = 0 channel = 0 # time = 1 # In beats # duration = [15, 15, 12, 13, 11, 13, 3] # In beats tempo = 20 # In BPM # volume = [110, 25, 100, 80, 40, 70, 120] # 0-127, as per the MIDI standard program = 10 MyMIDI = MIDIFile( 3, deinterleave=False ) # One track, defaults to format 1 (tempo track is created # automatically) MyMIDI.addTempo(0, 1, tempo) MyMIDI.addTempo(1, 1, tempo) MyMIDI.addTempo(2, 1, tempo) MyMIDI.addProgramChange(0, channel, 1, program) MyMIDI.addProgramChange(1, channel, 1, program) MyMIDI.addProgramChange(2, channel, 1, program) for i, pitch in enumerate(degrees): MyMIDI.addNote(track[i], channel, pitch, time[i], duration[i], volume[i]) with open("simulacrum.mid", "wb") as output_file: MyMIDI.writeFile(output_file) fs = FluidSynth(usesoundfont) # usesoundfont is defined at top of code fs.midi_to_audio('simulacrum.mid', 'simulacrum.wav') AudioSegment.from_wav("simulacrum.wav").export("simulacrum.mp3", format="mp3")
def convert_midi_to_audio(url): fs = FluidSynth() file = requests.get(url) open('input.mid', 'wb').write(file.content) fs.midi_to_audio('/Users/Michael/PycharmProjects/MidiDiscordBot/input.mid', 'output.wav') sound = AudioSegment.from_wav('output.wav') sound.export('output.mp3', format="mp3") return discord.File( r'output.mp3') if os.path.getsize('output.mp3') <= 16000000 else None
def midi2mp3(path): fname = os.path.basename(path) fname = os.path.splitext(fname)[0] fs = FluidSynth() fs.midi_to_audio(path, '../mp3/' + fname + '.wav') AudioSegment.from_wav('../mp3/' + fname + '.wav').export('../mp3/' + fname + '.mp3', format="mp3") os.remove('../mp3/' + fname + '.wav') return fname
def render(stream, path): ''' convert a music21 stream into a wav file using midi2audio, then optionally display it in the ipython notebook ''' mf = midi.translate.streamToMidiFile(stream) mf.open('temp.mid', 'wb') mf.write() mf.close() fs = FluidSynth() fs.midi_to_audio('temp.mid', path) os.remove('temp.mid')
def generate_notes_in_batch(note_params_df, output_dir, audio_format='flac', sample_rate=44100): """ Generates a batch of single note samples from the given table of parameters. `note_params_df` - a Pandas Dataframe with columns: `midi_number, midi_instrument, volume, duration, tempo`. Their meaning is the same as in generate_single_note. `output_dir` - output directory for the MIDI files Each sample goes to a single MIDI file named by the numeric index. Also each synthesized audio sample goes to a """ os.makedirs(output_dir, exist_ok=True) fs = FluidSynth(sample_rate=sample_rate) stream = Stream() for i, row in note_params_df.iterrows(): stream.append(MetronomeMark(number=row['tempo'])) stream.append(make_instrument(int(row['midi_instrument']))) duration = row['duration'] stream.append( chord_with_volume( Chord([ Note(midi=int(row['midi_number']), duration=Duration(duration)) ]), row['volume'])) stream.append(Rest(duration=Duration(2 * duration))) midi_file = '{0}/all_samples.midi'.format(output_dir) audio_file_stereo = '{0}/all_samples_stereo.{1}'.format( output_dir, audio_format) audio_file = '{0}/all_samples.{1}'.format(output_dir, audio_format) audio_index_file = '{0}/all_samples_index.csv'.format(output_dir) # TODO: We currently assume some fixed duration and tempo (1.0, 120)!!! # The parts should be split according to an index. audio_index = make_audio_index(note_params_df, 3.0, 0.5, sample_rate) audio_index.to_csv(audio_index_file) write_midi(stream, midi_file) fs.midi_to_audio(midi_file, audio_file_stereo) convert_to_mono(audio_file_stereo, audio_file) os.remove(audio_file_stereo) x, sample_rate = sf.read(audio_file) parts = split_audio_to_parts(x, sample_rate, audio_index) store_parts_to_files(parts, sample_rate, output_dir, audio_format)
def process(seed): hid_size = 512 classes = SOS_token + 1 np.random.seed(seed) decoder = DecoderRNN(hid_size, classes) decoder.load_state_dict(torch.load("decoders/decoder4", map_location='cpu')) decoder_hidden = torch.FloatTensor( [np.random.rand() for _ in range(hid_size)]).view(1, 1, -1).to(device) postprocess(evaluate(decoder, decoder_hidden), seed) fs = FluidSynth(os.getcwd() + '/GeneralUser_GS_SoftSynth_v144.sf2') fs.midi_to_audio('output_' + str(seed) + '.mid', 'new_song_' + str(seed) + '.wav')
def play_midi_file_from_disk( midi_path="mung2midi/sample/The_Nutcracker_Russion_dance.mid", soundfont='mung2midi/UprightPianoKW-small-SF2-20190703/UprightPianoKW-small-20190703.sf2' ): """Plays (or attempts to play) the given MIDI file. Requires Fluidsynth to be installed on your machine, see https://github.com/FluidSynth/fluidsynth/wiki/Download :param midi_path: Path to a MIDI file on the disc :param soundfont: A *.sf2 soundfont for FluidSynth to load. """ fs = FluidSynth( '/Users/elona/Documents/GitHub/mung/mung2midi/UprightPianoKW-small-SF2-20190703/UprightPianoKW-small-20190703.sf2' ) fs.play_midi(midi_path)
def convert_mp3(self, filename, to_mp3=True): """ converts midi to mp3. Arguments --------- `filename`: `str` `to_mp3`: `bool` """ fs = FluidSynth() title = filename.split('.')[0] audio_filename = f'{title}.mp3' if to_mp3 else f'{title}.wav' # saves file to disk fs.midi_to_audio(filename, audio_filename)
def listen(midi: PrettyMIDI, path=None, out=None): if not fs_exist: return False if not path: path = string.STATIC_DIR + "audio/" midi.write(path + "__listen__.mid") fs = FluidSynth(sound_font=string.STATIC_DIR + 'default_sound_font.sf2') try: os.makedirs(path) except: pass if out is None: out = time.strftime("%H_%M_%S", time.localtime()) + ".wav" fs.midi_to_audio(path + "__listen__.mid", path + out) os.remove(path + "__listen__.mid") return True
def generate_wav(path): if not Path(f"{path}.mid").is_file(): return False FluidSynth().midi_to_audio(f"{path}.mid", f"{path}.wav") if not Path(f"{path}.wav").is_file(): return False return True
def get_sample(): print("Enter seed in the range 1-25") seed = int(input()) decoder = torch.load(os.getcwd() + '/models_Sonya/decoder_1').to(device) input_rand = [] for i in range(0, hidden_size): input_rand.append(seed) input_rand = np.reshape(input_rand, (1, 1, hidden_size)) new_indexes = evaluate(input_rand, decoder, max_length=300, most_prob=15) mid_new = midi_from_indexes(new_indexes[20:], max_dur_note, amount_dur, each_dur) mid_new.save('new_song.mid') print('OK') # using the default sound font in 44100 Hz sample rate fs = FluidSynth(os.getcwd() + '/TimGM6mb.sf2') fs.midi_to_audio('new_song.mid', 'new_song.wav')
def save_mp3(stream_object): out_midi = stream_object.write('midi') # out_wav = str(Path(out_midi).with_suffix('.mp3')) out_wav = "./app/static/music/final_output.wav" FluidSynth( "./app/Melon_Model/data/soundfonts/FluidR3_GM.sf2").midi_to_audio( out_midi, out_wav) return out_wav
def output_integration(midi_dir, wav_dir): from midi2audio import FluidSynth import shutil midiout_dir = "midi_check/midi_wav" finalout_dir = "offline_output" shutil.rmtree("midi_check/midi_wav") os.makedirs("midi_check/midi_wav", exist_ok=True) fs = FluidSynth() midiwav_y = np.zeros(0) wav_y = np.zeros(0) for midifile in os.listdir(midi_dir): if ".mid" in midifile: fs.midi_to_audio(f'{midi_dir}/{midifile}', f'midi_check/midi_wav/{midifile[:-4]}.wav') midipath_list = os.listdir(midiout_dir) wavpath_list = os.listdir(wav_dir) def pathsort(path): return int(path.split(".")[0]) midipath_list.sort(key=pathsort) wavpath_list.sort(key=pathsort) for midiwav in midipath_list: y, sr = librosa.load(f'{midiout_dir}/{midiwav}', sr=hparam.sr) midiwav_y = np.concatenate((midiwav_y, y), axis=0) for wav_file in wavpath_list: y, sr = librosa.load(f'{wav_dir}/{wav_file}', sr=hparam.sr) wav_y = np.concatenate((wav_y, y), axis=0) wav_y /= wav_y.max() * 4 if wav_y.shape[0] > midiwav_y.shape[0]: wav_y = wav_y[:midiwav_y.shape[0]] else: midiwav_y = midiwav_y[:wav_y.shape[0]] wav_y = wav_y + midiwav_y * 10 librosa.output.write_wav(f"{finalout_dir}/finaltest.wav", wav_y, sr=hparam.sr)
def main(src_dir, sf2_path, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) for i, file in enumerate(sorted(listdir(src_dir))): new_file = file[:-4] if file == '.DS_Store': continue print(str(file)) fs = FluidSynth(sample_rate=16000) fs.midi_to_audio(join(src_dir, file), join(out_dir, new_file + '.wav'))
def setup(self): """Load the model""" # music21.environment.set("musicxmlPath", "/bin/true") note_embedding_dim = 20 meta_embedding_dim = 20 num_layers = 2 lstm_hidden_size = 256 dropout_lstm = 0.5 linear_hidden_size = 256 batch_size = 256 num_epochs = 5 train = False num_iterations = 500 sequence_length_ticks = 64 dataset_manager = DatasetManager() metadatas = [FermataMetadata(), TickMetadata(subdivision=4), KeyMetadata()] chorale_dataset_kwargs = { "voice_ids": [0, 1, 2, 3], "metadatas": metadatas, "sequences_size": 8, "subdivision": 4, } bach_chorales_dataset: ChoraleDataset = dataset_manager.get_dataset( name="bach_chorales", **chorale_dataset_kwargs ) dataset = bach_chorales_dataset self.deepbach = DeepBach( dataset=dataset, note_embedding_dim=note_embedding_dim, meta_embedding_dim=meta_embedding_dim, num_layers=num_layers, lstm_hidden_size=lstm_hidden_size, dropout_lstm=dropout_lstm, linear_hidden_size=linear_hidden_size, ) self.deepbach.load() # load fluidsynth fo rmidi 2 audio conversion self.fs = FluidSynth()
def parse_request(): image_data_encode = request.json.get('image') image_data_url, image_encode = image_data_encode.split(',') #fh = open(file_name, "wb") #fh.write(image_encode.decode('base64')) #fh.close() new_im = Image.open(BytesIO(base64.b64decode(image_encode))) new_im.save('test.png') #im = Image.open('test.png') #note_list = engine.engine(new_im) # constants track = 0 channel = 0 time = 0 tempo = request.json.get('tempo') volume = request.json.get('volume') my_midi = MIDIFile(1) my_midi.addTempo(track, time, tempo) for counter, my_tuple in enumerate(sample_list): note = my_tuple[0] note_type = my_tuple[1] midi_note = note_to_midi[note] duration = type_to_duration[note_type] my_midi.addNote(track, channel, midi_note, time, duration, volume) time += duration with open("test.mid", "wb") as output_file: my_midi.writeFile(output_file) fs = FluidSynth() fs.midi_to_audio('test.mid', 'test.wav') with open("test.wav", "r") as input_file: encoded_midi = base64.b64encode(input_file.read()) img_id = uuid.uuid4().hex file_name = img_id + '.png' return jsonify({'audio': { 'content': encoded_midi }})
def get_note(): length = int(request.args.get('length')) for i in range(24): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(Message('control_change', control=32, value=127, time=0)) track.append(Message('program_change', program=25, time=0)) track.append(Message('note_on', note=48+i, velocity=90, time=0)) track.append(Message('note_off', note=48+i, velocity=90, time=length)) track.append(Message('note_off', note=48+i, velocity=90, time=2*length)) mid.save('temp.mid') fs = FluidSynth('/Users/piotrek/Library/Audio/Sounds/Banks/fluid_r3_gm.sf2') fs.midi_to_audio('temp.mid', "static/audio/guitar/"+str(i)+"_"+str(length)+'.wav') return redirect('/')
def melody_to_audio(melody, midi_program=0): pm = melody_lib.melody_to_midi(melody, program=midi_program) unique_filename = str(uuid.uuid4()) midi_path = audio_filesystem_dir + unique_filename + ".mid" audi_path = audio_filesystem_dir + unique_filename + ".wav" pm.write(midi_path) FluidSynth().midi_to_audio(midi_path, audi_path) return audio_download_url + unique_filename + ".wav"
def toWaveForm(self, font="SteinwayGrandPiano_1.2.sf2"): """ Converts the data into a waveForm object. It uses fluidsynth module to perform audio conversion. Notice that a temporary wave sound file is created, which is then immediately erased. SERVER is a hypercriterion that indicates if the converted data has to be saved into a given path. Parameters ---------- font : str, optional Path to a soundfont which is used by fluidsynth to create audio data from midi. Defaults to "SteinwayGrandPiano_1.2.sf2". Returns ------- newWaveForm : waveForm Resulting WaveForm object. """ if not os.path.exists(self.outPath + "temp/"): os.makedirs(self.outPath + "temp/") midiPath = self.outPath + "temp/" +self.name + ".mid" wavePath = self.outPath + "temp/" + self.name + ".wav" pathFont = "SoundFonts/" + font self.writeToMidi(midiPath) F = FluidSynth(pathFont) F.midi_to_audio(midiPath, wavePath) # should return on an object of type waveForm defined in this folder newWaveForm = waveForm.waveForm(wavePath) # cleaning the temporary files process = subprocess.Popen("rm -f " + midiPath + " " + wavePath, shell=True, stderr=subprocess.DEVNULL ,stdout=subprocess.DEVNULL) process.wait() return newWaveForm