def predict(self, input, count, temp, length=500): songs_in_db_cnt = len(get_songs_by_author(self.db_name)) to_generate = count for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate): generated_output = generate_notes(self.model, input, self.mapper, self.mapper_list, temp=temp, length=length, normalize=False) midi_path = f'Transformer_{self.instrument_name}_{j}.mid' create_midi_with_embedded_durations( generated_output, target_instrument=self.target_instrument, filename=midi_path) change_midi_instrument(midi_path, self.target_instrument) midi_to_wav( midi_path, f'static/songs/Transformer_{self.instrument_name}_{j}.wav') self.save_song_to_db(f'Transformer_{self.instrument_name}_{j}.wav')
def predict(self, input, count, temp): songs_in_db_cnt = len(get_songs_by_author(self.db_name)) to_generate = count for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate): noise = np.random.normal(size=self.latent_dim) noise = np.expand_dims(noise, 0) pred = self.model.predict(noise) predicted = [] for i in pred: for k in i: index = sample(k, temp) if self.mapper_list is not None: # Idx of the mapper list is the new value, the element is the old value. This is used when I filter for outliers. index = self.mapper_list[index] pred_note = get_key_from_value(index, self.mapper) predicted.append(pred_note) midi_path = f'MusicVAE_{self.instrument_name}_{j}.mid' create_midi_with_embedded_durations( predicted, target_instrument=self.target_instrument, filename=midi_path) change_midi_instrument(midi_path, self.target_instrument) midi_to_wav( midi_path, f'static/songs/MusicVAE_{self.instrument_name}_{j}.wav') self.save_song_to_db(f'MusicVAE_{self.instrument_name}_{j}.wav')
def generate_music(self, encoded_chord_string, encoded_duration_string, count, length=200): songs_in_db_cnt = len(get_songs_by_author(self.db_name)) to_generate = count for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate): chord_states = None duration_states = None # This is an ugly hack, don't try anything like this at home. # chordsmc.simulate() throws an exception 90% of the time, because of floating point rounding errors, because my transition matrix is too huge. # A workaround is this ugly while True loop, which will spin the CPU until we get an errorless simulation. while chord_states is None: try: ids, chord_states = self.chordsmc.simulate( length, tf=np.asarray( self.chordsmc.observed_matrix).astype('float64'), start=random.choice(encoded_chord_string)) except: pass while duration_states is None: try: durids, duration_states = self.durationmc.simulate( length, tf=np.asarray( self.durationmc.observed_matrix).astype('float64'), start=random.choice(encoded_duration_string)) except: pass music = [] musicdurations = [] for i in chord_states: note = get_key_from_value(int(i), self.mapper) music.append(note) for i in duration_states: duration = get_key_from_value(int(i), self.duration_mapper) musicdurations.append(duration) midi_path = f'Markov_{self.instrument_name}_{j}.mid' create_midi_with_durations(music, musicdurations, self.target_instrument, midi_path) change_midi_instrument(midi_path, self.target_instrument) midi_to_wav(midi_path, f'static/songs/Markov_{self.instrument_name}_{j}.wav') self.save_song_to_db(f'Markov_{self.instrument_name}_{j}.wav')
def predict(self, input, count): for idx, note_sequence in enumerate(input): midi_path = f'GPT-2_{self.instrument_name}_{idx}.mid' try: #Exceptions can occur, because the GPT-2 model makes mistakes while generating text, resulting in invalid MIDI notes. create_midi_with_embedded_durations(note_sequence, filename=midi_path) change_midi_instrument(midi_path, self.target_instrument) midi_to_wav(midi_path, f'static/songs/GPT-2_{self.instrument_name}_{idx}.wav') self.save_song_to_db(f'GPT-2_{self.instrument_name}_{idx}.wav') except: #If an exception is the case I just ignore that sample pass
def predict(self, data, count, temp, length=500): songs = list(set([i.song for i in data])) bug = True while bug: try: condition = True while condition: try: random_song = random.choice(songs) slice_by_instrument = dict(zip(self.target_instruments_str, [[] for i in self.target_instruments_str])) for j in self.target_instruments_str: for i in data: if i.song == random_song and i.instrument == j: slice_by_instrument[j].append(i) slice_by_instrument_without_rests = dict(zip(self.target_instruments_str, [[] for i in self.target_instruments_str])) for i in slice_by_instrument.keys(): for song in slice_by_instrument[i]: if not isinstance(song.chords[0], note.Rest): slice_by_instrument_without_rests[i].append(song) if len(slice_by_instrument_without_rests[i]) != 0: slice_by_instrument[i] = random.choice(slice_by_instrument_without_rests[i]) else: slice_by_instrument[i] = random.choice(slice_by_instrument[i]) condition = False except IndexError: continue guitar_chords = slice_by_instrument['Electric Guitar'].chords guitar_durations = slice_by_instrument['Electric Guitar'].durations bass_chords = slice_by_instrument['Electric Bass'].chords drum_chords = slice_by_instrument['Piano'].chords starting_slice_notes = (np.asarray(encode_using_mapper(guitar_chords, self.guitar_mapper)) / len(self.guitar_mapper))[:20] starting_slice_durations = (np.asarray(encode_using_mapper(guitar_durations, self.guitar_durations_mapper)) / len( self.guitar_durations_mapper))[:20] starting_slice_bass = (np.asarray(encode_using_mapper(bass_chords, self.bass_mapper)) / len(self.bass_mapper))[:20] starting_slice_drum = (np.asarray(encode_using_mapper(drum_chords, self.drum_mapper)) / len(self.drum_mapper))[:20] songs_in_db_cnt = len(get_songs_by_author(self.db_name)) to_generate = count for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate): generated_output = generate_multi_instrument_notes(self.model, starting_slice_notes, starting_slice_durations, starting_slice_bass, starting_slice_drum, self.guitar_mapper, self.guitar_durations_mapper, self.bass_mapper, self.drum_mapper, self.guitar_mapper_list, self.durations_mapper_list, temp=temp, length = length) (guitar_output, bass_output, drum_output) = generated_output guitar_part = create_midipart_with_durations(guitar_output, target_instrument=self.target_instruments[0]) bass_part = create_midipart_with_durations(bass_output, target_instrument=self.target_instruments[1]) #drum_part = create_drum_part_with_durations(drum_output) # TODO fix drum sounds guitar_part.insert(0, self.target_instruments[0]) bass_part.insert(0, self.target_instruments[1]) #drum_part.insert(0, self.target_instruments[2]) full_midi = Score() full_midi.insert(0, guitar_part) full_midi.insert(0, bass_part) #full_midi.insert(0, drum_part) midi_path = f'LSTM_{self.instrument_name}_{j}.mid' full_midi.write('midi', fp=midi_path) midi_to_wav(midi_path, f'static/songs/LSTM_{self.instrument_name}_{j}.wav') self.save_song_to_db(f'LSTM_{self.instrument_name}_{j}.wav') bug = False except ValueError: continue
def predict(self, data, count, temp, length=500): songs = list(set([i.song for i in data])) bug = True while bug: try: condition = True while condition: try: random_song = random.choice(songs) slice_by_instrument = dict( zip(self.target_instruments_str, [[] for i in self.target_instruments_str])) for j in self.target_instruments_str: for i in data: if i.song == random_song and i.instrument == j: slice_by_instrument[j].append(i) slice_by_instrument_without_rests = dict( zip(self.target_instruments_str, [[] for i in self.target_instruments_str])) for i in slice_by_instrument.keys(): for song in slice_by_instrument[i]: if not isinstance(song.chords[0], note.Rest): slice_by_instrument_without_rests[ i].append(song) if len(slice_by_instrument_without_rests[i]) != 0: slice_by_instrument[i] = random.choice( slice_by_instrument_without_rests[i]) else: slice_by_instrument[i] = random.choice( slice_by_instrument[i]) condition = False except IndexError: continue guitar_chords = slice_by_instrument['Electric Guitar'].chords guitar_durations = slice_by_instrument[ 'Electric Guitar'].durations bass_chords = slice_by_instrument['Electric Bass'].chords bass_durations = slice_by_instrument['Electric Bass'].durations combined_guitar = combine_chords_with_durations( guitar_chords, guitar_durations) combined_bass = combine_chords_with_durations( bass_chords, bass_durations) starting_slice_notes = (np.asarray( encode_using_mapper(combined_guitar, self.guitar_mapper)))[:20] starting_slice_bass = (np.asarray( encode_using_mapper(combined_bass, self.bass_mapper)))[:20] songs_in_db_cnt = len(get_songs_by_author(self.db_name)) to_generate = count for j in range(songs_in_db_cnt, songs_in_db_cnt + to_generate): generated_guitar = generate_notes( self.guitar_model, starting_slice_notes, self.guitar_mapper, mapperlist=self.guitar_mapper_list, temp=temp, length=length, normalize=False, random_start=False) generated_bass = generate_notes( self.bass_model, starting_slice_bass, self.bass_mapper, mapperlist=self.bass_mapper_list, temp=temp, length=length, normalize=False, random_start=False) guitar_part = create_midipart_with_durations( generated_guitar, target_instrument=self.target_instruments[0]) bass_part = create_midipart_with_durations( generated_bass, target_instrument=self.target_instruments[1]) guitar_part.insert(0, self.target_instruments[0]) bass_part.insert(0, self.target_instruments[1]) full_midi = Score() full_midi.insert(0, guitar_part) full_midi.insert(0, bass_part) midi_path = f'Transformer_{self.instrument_name}_{j}.mid' full_midi.write('midi', fp=midi_path) midi_to_wav( midi_path, f'static/songs/Transformer_{self.instrument_name}_{j}.wav' ) self.save_song_to_db( f'Transformer_{self.instrument_name}_{j}.wav') bug = False except: continue