def music_vae_interpolate(sequence1, sequence2, model_id, model_config, num): music_vae = TrainedModel(configs.CONFIG_MAP[model_config], batch_size=4, checkpoint_dir_or_path=model_id + '.tar') note_sequences = music_vae.interpolate(sequence1, sequence2, num_steps=num, length=32) # Concatenate them into one long sequence, with the start and # end sequences at each end. return mm.sequences_lib.concatenate_sequences(note_sequences)
for ns in generated_sequences: # print(ns) #mm.plot_sequence(ns) #mm.play_sequence(ns, synth=mm.fluidsynth) # We're going to interpolate between the Twinkle Twinkle Little Star # NoteSequence we defined in the first section, and one of the generated # sequences from the previous VAE example # How many sequences, including the start and end ones, to generate. num_steps = 8 # This gives us a list of sequences. note_sequences = music_vae.interpolate(twinkle_twinkle, teapot, num_steps=num_steps, length=32) # Concatenate them into one long sequence, with the start and # end sequences at each end. interp_seq = mm.sequences_lib.concatenate_sequences(note_sequences) #mm.play_sequence(interp_seq, synth=mm.fluidsynth) #mm.plot_sequence(interp_seq) mm.sequence_proto_to_midi_file(interp_seq, 'interp_seq.mid') files.download('interp_seq.mid') # pylint: enable=g-import-not-at-top # Allow pretty_midi to read MIDI files with absurdly high tick rates. # Useful for reading the MAPS dataset.
babyshark = mm.midi_file_to_note_sequence('./mid/babyshark.mid') babyshark = mm.extract_subsequence(babyshark, 0, 8) babyshark.ticks_per_quarter = 0 babyshark.time_signatures.pop() babyshark.key_signatures.pop() babyshark.tempos.pop() babyshark.tempos.add(qpm=60) for note in babyshark.notes: if note.pitch < 60: note.pitch = 60 note.instrument = 0 note.is_drum = False # This gives us a list of sequences. note_sequences = music_vae.interpolate(twinkle_twinkle, babyshark, num_steps=num_steps, length=8) # Concatenate them into one long sequence, with the start and # end sequences at each end. interp_seq = mm.sequences_lib.concatenate_sequences(note_sequences) mm.play_sequence(interp_seq, synth=mm.fluidsynth) mm.plot_sequence(interp_seq) mm.sequence_proto_to_midi_file(interp_seq, 'twinkle_shark.mid')
class MusicVae: """ Author: Tanish and Akshit Last Modified: 02/02/21 Version: 1.2 Class to wrap the music vae trained from magenta """ def __init__(self): """ Loads and initializes the vae """ print("Initializing Music VAE...") self.music_vae = TrainedModel( configs.CONFIG_MAP['cat-mel_2bar_big'], batch_size=4, checkpoint_dir_or_path='/content/mel_2bar_big.ckpt') print('🎉 Done!') def generate(self, n=2, length=80, temperature=1.0): """ Generates a random music sequence Args: n: number of samples to generate type: int length: length of each sample type: int temperature: emphirical magnitude of randomness in generated sequences type: float Returns: List[NotesSequence] of generated music """ generated_sequences = music_vae.sample(n=2, length=80, temperature=1.0) for ns in generated_sequences: note_seq.play_sequence(ns, synth=note_seq.fluidsynth) return generated_sequences def interpolate(self, sequence_one, sequence_two, num_steps=8): """ Continues a music sequence Args: sequence_one: first sequence type: NoteSequence object sequence_two: second sequence type: NoteSequence object num_steps: number of sequences to interpolate through type: int Returns: NotesSequence object of interpolated music """ # This gives us a list of sequences. note_sequences = self.music_vae.interpolate(twinkle_twinkle, teapot, num_steps=num_steps, length=32) # Concatenate them into one long sequence, with the start and # end sequences at each end. interp_seq = note_seq.sequences_lib.concatenate_sequences( note_sequences) note_seq.play_sequence(interp_seq, synth=note_seq.fluidsynth) return interp_seq