def interpolate(self, sequence_one, sequence_two, num_steps=8): """ Continues a music sequence Args: sequence_one: first sequence type: NoteSequence object sequence_two: second sequence type: NoteSequence object num_steps: number of sequences to interpolate through type: int Returns: NotesSequence object of interpolated music """ # This gives us a list of sequences. note_sequences = self.music_vae.interpolate(twinkle_twinkle, teapot, num_steps=num_steps, length=32) # Concatenate them into one long sequence, with the start and # end sequences at each end. interp_seq = note_seq.sequences_lib.concatenate_sequences( note_sequences) note_seq.play_sequence(interp_seq, synth=note_seq.fluidsynth) return interp_seq
def render_token_sequence(token_sequence, use_program=True, use_drums=True): note_sequence = token_sequence_to_note_sequence(token_sequence, use_program=use_program, use_drums=use_drums) synth = note_seq.midi_synth.fluidsynth note_seq.plot_sequence(note_sequence) note_seq.play_sequence(note_sequence, synth)
def __call__(self, input_sequence): """ Continues a music sequence Args: input_sequence: initial sequence to continue type: NoteSequence object Returns: NotesSequence object of continued music """ num_steps = 128 # shorter or longer sequences temperature = 1.0 # the higher the temperature the more random the sequence. # Set the start time to begin on the next step after the last note ends. last_end_time = (max( n.end_time for n in input_sequence.notes) if input_sequence.notes else 0) qpm = input_sequence.tempos[0].qpm seconds_per_step = 60.0 / qpm / melody_rnn.steps_per_quarter total_seconds = num_steps * seconds_per_step generator_options = generator_pb2.GeneratorOptions() generator_options.args['temperature'].float_value = temperature generator_options.generate_sections.add(start_time=last_end_time + seconds_per_step, end_time=total_seconds) # Ask the model to continue the sequence. sequence = self.melody_rnn.generate(input_sequence, generator_options) note_seq.play_sequence(sequence, synth=note_seq.fluidsynth) return sequence
def generate(self, n=2, length=80, temperature=1.0): """ Generates a random music sequence Args: n: number of samples to generate type: int length: length of each sample type: int temperature: emphirical magnitude of randomness in generated sequences type: float Returns: List[NotesSequence] of generated music """ generated_sequences = music_vae.sample(n=2, length=80, temperature=1.0) for ns in generated_sequences: note_seq.play_sequence(ns, synth=note_seq.fluidsynth) return generated_sequences
targets = [] decode_length = 1024 # Generate sample events. sample_ids = next(unconditional_samples)['outputs'] # Decode to NoteSequence. midi_filename = decode( sample_ids, encoder=unconditional_encoders['targets']) unconditional_ns = note_seq.midi_file_to_note_sequence(midi_filename) # Play and plot. note_seq.play_sequence( unconditional_ns, synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) note_seq.plot_sequence(unconditional_ns) #@title Download Performance as MIDI #@markdown Download generated performance as MIDI (optional). note_seq.sequence_proto_to_midi_file( unconditional_ns, '/tmp/unconditional.mid') files.download('/tmp/unconditional.mid') #@title Choose Priming Sequence #@markdown Here you can choose a priming sequence to be continued #@markdown by the model. We have provided a few, or you can #@markdown upload your own MIDI file. #@markdown
#@title Load Model #@markdown The `ismir2021` model transcribes piano only, with note velocities. #@markdown The `mt3` model transcribes multiple simultaneous instruments, #@markdown but without velocities. MODEL = "mt3" #@param["ismir2021", "mt3"] checkpoint_path = f'/content/checkpoints/{MODEL}/' inference_model = InferenceModel(checkpoint_path, MODEL) #@title Upload Audio audio = upload_audio(sample_rate=SAMPLE_RATE) note_seq.notebook_utils.colab_play(audio, sample_rate=SAMPLE_RATE) #@title Transcribe Audio #@markdown This may take a few minutes depending on the length of the WAV file #@markdown you uploaded. est_ns = inference_model(audio) note_seq.play_sequence(est_ns, synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) note_seq.plot_sequence(est_ns) #@title Download MIDI Transcription note_seq.sequence_proto_to_midi_file(est_ns, '/tmp/transcribed.mid') files.download('/tmp/transcribed.mid')
twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80) twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80) twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80) twinkle_twinkle.total_time = 8 twinkle_twinkle.tempos.add(qpm=60) # This is a colab utility method that visualizes a NoteSequence. note_seq.plot_sequence(twinkle_twinkle) # This is a colab utility method that plays a NoteSequence. # note_seq.play_sequence(twinkle_twinkle,synth=note_seq.fluidsynth) # Here's another NoteSequence! teapot = music_pb2.NoteSequence() teapot.notes.add(pitch=69, start_time=0, end_time=0.5, velocity=80) teapot.notes.add(pitch=71, start_time=0.5, end_time=1, velocity=80) teapot.notes.add(pitch=73, start_time=1, end_time=1.5, velocity=80) teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80) teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80) teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80) teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80) teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80) teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80) teapot.total_time = 8 teapot.tempos.add(qpm=60) note_seq.plot_sequence(teapot) note_seq.play_sequence(teapot)
def create_test_sequences(): """ Author: Akshit and Jenny Last Modified: 01/21/21 Version: 1.0 Creating test music sequences """ twinkle_twinkle = music_pb2.NoteSequence() # Add the notes to the sequence. twinkle_twinkle.notes.add(pitch=60, start_time=0.0, end_time=0.5, velocity=80) twinkle_twinkle.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=80) twinkle_twinkle.notes.add(pitch=67, start_time=1.0, end_time=1.5, velocity=80) twinkle_twinkle.notes.add(pitch=67, start_time=1.5, end_time=2.0, velocity=80) twinkle_twinkle.notes.add(pitch=69, start_time=2.0, end_time=2.5, velocity=80) twinkle_twinkle.notes.add(pitch=69, start_time=2.5, end_time=3.0, velocity=80) twinkle_twinkle.notes.add(pitch=67, start_time=3.0, end_time=4.0, velocity=80) twinkle_twinkle.notes.add(pitch=65, start_time=4.0, end_time=4.5, velocity=80) twinkle_twinkle.notes.add(pitch=65, start_time=4.5, end_time=5.0, velocity=80) twinkle_twinkle.notes.add(pitch=64, start_time=5.0, end_time=5.5, velocity=80) twinkle_twinkle.notes.add(pitch=64, start_time=5.5, end_time=6.0, velocity=80) twinkle_twinkle.notes.add(pitch=62, start_time=6.0, end_time=6.5, velocity=80) twinkle_twinkle.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80) twinkle_twinkle.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80) twinkle_twinkle.total_time = 8 twinkle_twinkle.tempos.add(qpm=60) note_seq.plot_sequence(twinkle_twinkle) note_seq.play_sequence(twinkle_twinkle, synth=note_seq.fluidsynth) teapot = music_pb2.NoteSequence() teapot.notes.add(pitch=69, start_time=0, end_time=0.5, velocity=80) teapot.notes.add(pitch=71, start_time=0.5, end_time=1, velocity=80) teapot.notes.add(pitch=73, start_time=1, end_time=1.5, velocity=80) teapot.notes.add(pitch=74, start_time=1.5, end_time=2, velocity=80) teapot.notes.add(pitch=76, start_time=2, end_time=2.5, velocity=80) teapot.notes.add(pitch=81, start_time=3, end_time=4, velocity=80) teapot.notes.add(pitch=78, start_time=4, end_time=5, velocity=80) teapot.notes.add(pitch=81, start_time=5, end_time=6, velocity=80) teapot.notes.add(pitch=76, start_time=6, end_time=8, velocity=80) teapot.total_time = 8 teapot.tempos.add(qpm=60) note_seq.plot_sequence(teapot) note_seq.play_sequence(teapot, synth=note_seq.synthesize) return twinkle_twinkle, teapot
def generate_midi(midi_input): # Create input generator. def input_generator(): global inputs while True: yield { 'inputs': np.array([[inputs]], dtype=np.int32), 'targets': np.zeros([1, 0], dtype=np.int32), 'decode_length': np.array(decode_length, dtype=np.int32) } # Start the Estimator, loading from the specified checkpoint. input_fn = decoding.make_input_fn_from_generator(input_generator()) melody_conditioned_samples = estimator.predict( input_fn, checkpoint_path=ckpt_path) # "Burn" one. _ = next(melody_conditioned_samples) #@title Choose Melody #@markdown Here you can choose a melody to be accompanied by the #@markdown model. We have provided a few, or you can upload a #@markdown MIDI file; if your MIDI file is polyphonic, the notes #@markdown with highest pitch will be used as the melody. # Tokens to insert between melody events. # @title Generate from Scratch # @markdown Generate a piano performance from scratch. # @markdown # @markdown This can take a minute or so depending on the length # @markdown of the performance the model ends up generating. # @markdown Because we use a # @markdown [representation](http://g.co/magenta/performance-rnn) # @markdown where each event corresponds to a variable amount of # @markdown time, the actual number of seconds generated may vary. event_padding = 2 * [note_seq.MELODY_NO_EVENT] events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event for e in midi_input for event in [e] + event_padding] inputs = melody_conditioned_encoders['inputs'].encode( ' '.join(str(e) for e in events)) melody_ns = note_seq.Melody(events).to_sequence(qpm=150) targets = [] decode_length = 4096 # decode_length = np.random.randint(len(inputs)*3,len(inputs)*5) # print(((decode_length) - len(inputs))/len(inputs)) sample_ids = next(melody_conditioned_samples)['outputs'] # Decode to NoteSequence. midi_filename = decode( sample_ids, encoder=melody_conditioned_encoders['targets']) accompaniment_ns = note_seq.midi_file_to_note_sequence(midi_filename) # Use one of the provided melodies. events = [event + 12 if event != note_seq.MELODY_NO_EVENT else event for e in midi_input for event in [e] + event_padding] inputs = melody_conditioned_encoders['inputs'].encode( ' '.join(str(e) for e in events)) melody_ns = note_seq.Melody(events).to_sequence(qpm=150) # Play and plot the melody. note_seq.play_sequence( melody_ns, synth=note_seq.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) note_seq.plot_sequence(melody_ns)