def testMelodyChords(self): melodies = [ mm.Melody([60, -2, -2, -1], start_step=0, steps_per_quarter=1, steps_per_bar=4), mm.Melody([62, -2, -2, -1], start_step=4, steps_per_quarter=1, steps_per_bar=4), ] quantized_sequence = mm.quantize_note_sequence(self.note_sequence, steps_per_quarter=1) chords = chord_utils.event_list_chords(quantized_sequence, melodies) expected_chords = [[mm.NO_CHORD, mm.NO_CHORD, 'C', 'C'], ['C', 'C', 'G7', 'G7']] self.assertEqual(expected_chords, chords)
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise mm.SequenceGeneratorException('This model supports at most one input_sections message, but got %s' %len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise mm.SequenceGeneratorException('This model supports only 1 generate_sections message, but got %s' %len(generator_options.generate_sections)) qpm = (input_sequence.tempos[0].qpm if input_sequence and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE) steps_per_second = mm.steps_per_quarter_to_steps_per_second(self.steps_per_quarter, qpm) generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = mm.trim_note_sequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = mm.quantize_to_step(input_section.start_time, steps_per_second, quantize_cutoff=0) else: primer_sequence = input_sequence input_start_step = 0 last_end_time = (max(n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time > generate_section.start_time: raise mm.SequenceGeneratorException('start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) quantized_sequence = mm.quantize_note_sequence(primer_sequence, self.steps_per_quarter) extracted_melodies, _ = mm.extract_melodies(quantized_sequence, search_start_step=input_start_step, min_bars=0,min_unique_pitches=1, gap_bars=float('inf'),ignore_polyphonic_notes=True) assert len(extracted_melodies) <= 1 start_step = mm.quantize_to_step( generate_section.start_time, steps_per_second, quantize_cutoff=0) end_step = mm.quantize_to_step(generate_section.end_time, steps_per_second, quantize_cutoff=1.0) if extracted_melodies and extracted_melodies[0]: melody = extracted_melodies[0] else: steps_per_bar = int(mm.steps_per_bar_in_quantized_sequence(quantized_sequence)) melody = mm.Melody([],start_step=max(0, start_step - 1),steps_per_bar=steps_per_bar,steps_per_quarter=self.steps_per_quarter) melody.set_length(start_step - melody.start_step) arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) generated_melody = self._model.generate_melody(end_step - melody.start_step, melody, **args) generated_sequence = generated_melody.to_sequence(qpm=qpm) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise mm.SequenceGeneratorException( 'This model supports at most one input_sections message, but got %s' % len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise mm.SequenceGeneratorException( 'This model supports only 1 generate_sections message, but got %s' % len(generator_options.generate_sections)) qpm = (input_sequence.tempos[0].qpm if input_sequence and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE) generate_section = generator_options.generate_sections[0] if generator_options.input_sections: # Use primer melody from input section only. Take backing chords from # beginning of input section through end of generate section. input_section = generator_options.input_sections[0] primer_sequence = mm.extract_subsequence( input_sequence, input_section.start_time, input_section.end_time) backing_sequence = mm.extract_subsequence( input_sequence, input_section.start_time, generate_section.end_time) input_start_step = self.seconds_to_steps(input_section.start_time, qpm) else: # No input section. Take primer melody from the beginning of the sequence # up until the start of the generate section. primer_sequence = mm.extract_subsequence( input_sequence, 0.0, generate_section.start_time) backing_sequence = mm.extract_subsequence( input_sequence, 0.0, generate_section.end_time) input_start_step = 0 last_end_time = (max(n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time >= generate_section.start_time: raise mm.SequenceGeneratorException( 'Got GenerateSection request for section that is before or equal to ' 'the end of the input section. This model can only extend melodies. ' 'Requested start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming and backing sequences. quantized_primer_sequence = mm.quantize_note_sequence( primer_sequence, self._steps_per_quarter) quantized_backing_sequence = mm.quantize_note_sequence( backing_sequence, self._steps_per_quarter) # Setting gap_bars to infinite ensures that the entire input will be used. extracted_melodies, _ = mm.extract_melodies( quantized_primer_sequence, search_start_step=input_start_step, min_bars=0, min_unique_pitches=1, gap_bars=float('inf'), ignore_polyphonic_notes=True) assert len(extracted_melodies) <= 1 start_step = self.seconds_to_steps( generate_section.start_time, qpm) end_step = self.seconds_to_steps(generate_section.end_time, qpm) if extracted_melodies and extracted_melodies[0]: melody = extracted_melodies[0] else: # If no melody could be extracted, create an empty melody that starts 1 # step before the request start_step. This will result in 1 step of # silence when the melody is extended below. melody = mm.Melody([], start_step=max(0, start_step - 1)) extracted_chords, _ = mm.extract_chords(quantized_backing_sequence) chords = extracted_chords[0] # Make sure that chords and melody start on the same step. if chords.start_step < melody.start_step: chords.set_length(len(chords) - melody.start_step + chords.start_step) assert chords.end_step == end_step # Ensure that the melody extends up to the step we want to start generating. melody.set_length(start_step - melody.start_step) # Extract generation arguments from generator options. arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) generated_melody = self._model.generate_melody(melody, chords, **args) generated_lead_sheet = mm.LeadSheet(generated_melody, chords) generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence
def melody_fn(): return mm.Melody( steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
def _generate(self, input_sequence, generator_options): if len(generator_options.input_sections) > 1: raise mm.SequenceGeneratorException( 'This model supports at most one input_sections message, but got %s' % len(generator_options.input_sections)) if len(generator_options.generate_sections) != 1: raise mm.SequenceGeneratorException( 'This model supports only 1 generate_sections message, but got %s' % len(generator_options.generate_sections)) qpm = (input_sequence.tempos[0].qpm if input_sequence and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE) steps_per_second = mm.steps_per_quarter_to_steps_per_second( self.steps_per_quarter, qpm) generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = mm.trim_note_sequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = mm.quantize_to_step(input_section.start_time, steps_per_second) else: primer_sequence = input_sequence input_start_step = 0 last_end_time = (max( n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time > generate_section.start_time: raise mm.SequenceGeneratorException( 'Got GenerateSection request for section that is before the end of ' 'the NoteSequence. This model can only extend sequences. Requested ' 'start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming sequence. quantized_sequence = mm.quantize_note_sequence(primer_sequence, self.steps_per_quarter) # Setting gap_bars to infinite ensures that the entire input will be used. extracted_melodies, _ = mm.extract_melodies( quantized_sequence, search_start_step=input_start_step, min_bars=0, min_unique_pitches=1, gap_bars=float('inf'), ignore_polyphonic_notes=True) assert len(extracted_melodies) <= 1 start_step = mm.quantize_to_step(generate_section.start_time, steps_per_second) end_step = mm.quantize_to_step(generate_section.end_time, steps_per_second) if extracted_melodies and extracted_melodies[0]: melody = extracted_melodies[0] else: # If no melody could be extracted, create an empty melody that starts 1 # step before the request start_step. This will result in 1 step of # silence when the melody is extended below. steps_per_bar = int( mm.steps_per_bar_in_quantized_sequence(quantized_sequence)) melody = mm.Melody([], start_step=max(0, start_step - 1), steps_per_bar=steps_per_bar, steps_per_quarter=self.steps_per_quarter) # Ensure that the melody extends up to the step we want to start generating. melody.set_length(start_step - melody.start_step) # Extract generation arguments from generator options. arg_types = { 'temperature': lambda arg: arg.float_value, 'beam_size': lambda arg: arg.int_value, 'branch_factor': lambda arg: arg.int_value, 'steps_per_iteration': lambda arg: arg.int_value } args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args) generated_melody = self._model.generate_melody( end_step - melody.start_step, melody, **args) generated_sequence = generated_melody.to_sequence(qpm=qpm) assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5 return generated_sequence
def seq_to_midi_file(self, seq, output_file): melody = mm.Melody(events=seq.tolist()) note_sequence = melody.to_sequence(qpm=80.0) mm.sequence_proto_to_midi_file(note_sequence, output_file) return seq
def _primer_melody_to_event_sequence(self, input_sequence, generator_options, config): qpm = (input_sequence.tempos[0].qpm if input_sequence and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE) steps_per_second = mm.steps_per_quarter_to_steps_per_second( self.steps_per_quarter, qpm) generate_section = generator_options.generate_sections[0] if generator_options.input_sections: input_section = generator_options.input_sections[0] primer_sequence = mm.trim_note_sequence(input_sequence, input_section.start_time, input_section.end_time) input_start_step = mm.quantize_to_step(input_section.start_time, steps_per_second, quantize_cutoff=0) else: primer_sequence = input_sequence input_start_step = 0 last_end_time = (max( n.end_time for n in primer_sequence.notes) if primer_sequence.notes else 0) if last_end_time > generate_section.start_time: raise mm.SequenceGeneratorException( 'Got GenerateSection request for section that is before the end of ' 'the NoteSequence. This model can only extend sequences. Requested ' 'start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time)) # Quantize the priming sequence. quantized_sequence = mm.quantize_note_sequence(primer_sequence, self.steps_per_quarter) # Setting gap_bars to infinite ensures that the entire input will be used. extracted_melodies, _ = mm.extract_melodies( quantized_sequence, search_start_step=input_start_step, min_bars=0, min_unique_pitches=1, gap_bars=float('inf'), ignore_polyphonic_notes=True) assert len(extracted_melodies) <= 1 start_step = mm.quantize_to_step(generate_section.start_time, steps_per_second, quantize_cutoff=0) # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it # always rounds down. This avoids generating a sequence that ends at 5.0 # seconds when the requested end time is 4.99. end_step = mm.quantize_to_step(generate_section.end_time, steps_per_second, quantize_cutoff=1.0) if extracted_melodies and extracted_melodies[0]: melody = extracted_melodies[0] else: # If no melody could be extracted, create an empty melody that starts 1 # step before the request start_step. This will result in 1 step of # silence when the melody is extended below. steps_per_bar = int( mm.steps_per_bar_in_quantized_sequence(quantized_sequence)) melody = mm.Melody([], start_step=max(0, start_step - 1), steps_per_bar=steps_per_bar, steps_per_quarter=self.steps_per_quarter) # Ensure that the melody extends up to the step we want to start generating. melody.set_length(start_step - melody.start_step - 2) now_encoding = config.encoder_decoder._one_hot_encoding # Extract generation arguments from generator options. primer_events = self._model.primer_melody_to_events( end_step - melody.start_step, melody) for i, event in enumerate(primer_events): primer_events[i] = now_encoding.encode_event(event) return primer_events
if note.instrument == melody_instrument] del melody_ns.notes[:] melody_ns.notes.extend( sorted(notes, key=lambda note: note.start_time)) for i in range(len(melody_ns.notes) - 1): melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time inputs = melody_conditioned_encoders['inputs'].encode_note_sequence( melody_ns) else: # Use one of the provided melodies. events = [event + 12 if event != mm.MELODY_NO_EVENT else event for e in melodies[melody] for event in [e] + event_padding] inputs = melody_conditioned_encoders['inputs'].encode( ' '.join(str(e) for e in events)) melody_ns = mm.Melody(events).to_sequence(qpm=150) # Play and plot the melody. mm.play_sequence( melody_ns, synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH) mm.plot_sequence(melody_ns) #@title Generate Accompaniment for Melody #@markdown Generate a piano performance consisting of the chosen #@markdown melody plus accompaniment. # Generate sample events. decode_length = 4096 sample_ids = next(melody_conditioned_samples)['outputs']