def _generate(self, input_sequence, generator_options):
   if len(generator_options.input_sections) > 1:
       raise mm.SequenceGeneratorException('This model supports at most one input_sections message, but got %s' %len(generator_options.input_sections))
   if len(generator_options.generate_sections) != 1:
       raise mm.SequenceGeneratorException('This model supports only 1 generate_sections message, but got %s' %len(generator_options.generate_sections))
   qpm = (input_sequence.tempos[0].qpm if input_sequence and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE)
   steps_per_second = mm.steps_per_quarter_to_steps_per_second(self.steps_per_quarter, qpm)
   generate_section = generator_options.generate_sections[0]
   if generator_options.input_sections:
       input_section = generator_options.input_sections[0]
       primer_sequence = mm.trim_note_sequence(input_sequence, input_section.start_time, input_section.end_time)
       input_start_step = mm.quantize_to_step(input_section.start_time, steps_per_second, quantize_cutoff=0)
   else:
       primer_sequence = input_sequence
       input_start_step = 0
   last_end_time = (max(n.end_time for n in primer_sequence.notes)
                       if primer_sequence.notes else 0)
   if last_end_time > generate_section.start_time:
       raise mm.SequenceGeneratorException('start time: %s, Final note end time: %s' % (generate_section.start_time, last_end_time))
   quantized_sequence = mm.quantize_note_sequence(primer_sequence, self.steps_per_quarter)
   extracted_melodies, _ = mm.extract_melodies(quantized_sequence, search_start_step=input_start_step, min_bars=0,min_unique_pitches=1, gap_bars=float('inf'),ignore_polyphonic_notes=True)
   assert len(extracted_melodies) <= 1
   start_step = mm.quantize_to_step(
       generate_section.start_time, steps_per_second, quantize_cutoff=0)
   end_step = mm.quantize_to_step(generate_section.end_time, steps_per_second, quantize_cutoff=1.0)
   if extracted_melodies and extracted_melodies[0]:
       melody = extracted_melodies[0]
   else:
       steps_per_bar = int(mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
       melody = mm.Melody([],start_step=max(0, start_step - 1),steps_per_bar=steps_per_bar,steps_per_quarter=self.steps_per_quarter)
   melody.set_length(start_step - melody.start_step)
   arg_types = {
       'temperature': lambda arg: arg.float_value,
       'beam_size': lambda arg: arg.int_value,
       'branch_factor': lambda arg: arg.int_value,
       'steps_per_iteration': lambda arg: arg.int_value
   }
   args = dict((name, value_fn(generator_options.args[name])) for name, value_fn in arg_types.items() if name in generator_options.args)
   generated_melody = self._model.generate_melody(end_step - melody.start_step, melody, **args)
   generated_sequence = generated_melody.to_sequence(qpm=qpm)
   assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
   return generated_sequence
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0

        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = pianoroll_pipeline.extract_pianoroll_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = mm.quantize_to_step(generate_section.start_time,
                                                  steps_per_second,
                                                  quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = mm.quantize_to_step(generate_section.end_time,
                                                steps_per_second,
                                                quantize_cutoff=1.0)

        if extracted_seqs and extracted_seqs[0]:
            pianoroll_seq = extracted_seqs[0]
        else:
            raise ValueError('No priming pianoroll could be extracted.')

        # Ensure that the track extends up to the step we want to start generating.
        pianoroll_seq.set_length(generate_start_step -
                                 pianoroll_seq.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        total_steps = pianoroll_seq.num_steps + (generate_end_step -
                                                 generate_start_step)

        pianoroll_seq = self._model.generate_pianoroll_sequence(
            total_steps, pianoroll_seq, **args)
        pianoroll_seq.set_length(total_steps)

        generated_sequence = pianoroll_seq.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Example #3
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        # This sequence will be quantized later, so it is guaranteed to have only 1
        # tempo.
        qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        if input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm

        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0

        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)

        extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
            quantized_primer_sequence, start_step=input_start_step)
        assert len(extracted_seqs) <= 1

        generate_start_step = mm.quantize_to_step(generate_section.start_time,
                                                  steps_per_second,
                                                  quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        generate_end_step = mm.quantize_to_step(generate_section.end_time,
                                                steps_per_second,
                                                quantize_cutoff=1.0)

        if extracted_seqs and extracted_seqs[0]:
            poly_seq = extracted_seqs[0]
        else:
            # If no track could be extracted, create an empty track that starts at the
            # requested generate_start_step. This will result in a sequence that
            # contains only the START token.
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)

        # Ensure that the track extends up to the step we want to start generating.
        poly_seq.set_length(generate_start_step - poly_seq.start_step)
        # Trim any trailing end events to prepare the sequence for more events to be
        # appended during generation.
        poly_seq.trim_trailing_end_events()

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        # Inject the priming sequence as melody in the output of the generator, if
        # requested.
        # This option starts with no_ so that if it is unspecified (as will be the
        # case when used with the midi interface), the default will be to inject the
        # primer.
        if not (generator_options.args['no_inject_primer_during_generation'].
                bool_value):
            melody_to_inject = copy.deepcopy(poly_seq)
            if generator_options.args['condition_on_primer'].bool_value:
                inject_start_step = poly_seq.num_steps
            else:
                # 0 steps because we'll overwrite poly_seq with a blank sequence below.
                inject_start_step = 0

            args['modify_events_callback'] = functools.partial(
                _inject_melody, melody_to_inject, inject_start_step)

        # If we don't want to condition on the priming sequence, then overwrite
        # poly_seq with a blank sequence to feed into the generator.
        if not generator_options.args['condition_on_primer'].bool_value:
            poly_seq = polyphony_lib.PolyphonicSequence(
                steps_per_quarter=(quantized_primer_sequence.quantization_info.
                                   steps_per_quarter),
                start_step=generate_start_step)
            poly_seq.trim_trailing_end_events()

        total_steps = poly_seq.num_steps + (generate_end_step -
                                            generate_start_step)

        while poly_seq.num_steps < total_steps:
            # Assume it takes ~5 rnn steps to generate one quantized step.
            # Can't know for sure until generation is finished because the number of
            # notes per quantized step is variable.
            steps_to_gen = total_steps - poly_seq.num_steps
            rnn_steps_to_gen = 5 * steps_to_gen
            tf.logging.info(
                'Need to generate %d more steps for this sequence, will try asking '
                'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
            poly_seq = self._model.generate_polyphonic_sequence(
                len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
        poly_seq.set_length(total_steps)

        if generator_options.args['condition_on_primer'].bool_value:
            generated_sequence = poly_seq.to_sequence(qpm=qpm)
        else:
            # Specify a base_note_sequence because the priming sequence was not
            # included in poly_seq.
            generated_sequence = poly_seq.to_sequence(
                qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Example #4
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorException(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        qpm = (input_sequence.tempos[0].qpm if input_sequence
               and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE)
        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = mm.quantize_note_sequence(primer_sequence,
                                                       self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = mm.extract_melodies(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        start_step = mm.quantize_to_step(generate_section.start_time,
                                         steps_per_second)
        end_step = mm.quantize_to_step(generate_section.end_time,
                                       steps_per_second)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            # If no melody could be extracted, create an empty melody that starts 1
            # step before the request start_step. This will result in 1 step of
            # silence when the melody is extended below.
            steps_per_bar = int(
                mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
            melody = mm.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step - melody.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_melody = self._model.generate_melody(
            end_step - melody.start_step, melody, **args)
        generated_sequence = generated_melody.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    qpm = (input_sequence.tempos[0].qpm
           if input_sequence and input_sequence.tempos
           else mm.DEFAULT_QUARTERS_PER_MINUTE)
    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before the end of '
          'the NoteSequence. This model can only extend sequences. Requested '
          'start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)
    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = mm.extract_melodies(
        quantized_sequence, search_start_step=input_start_step, min_bars=0,
        min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      steps_per_bar = int(
          mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
      melody = mm.Melody([],
                         start_step=max(0, start_step - 1),
                         steps_per_bar=steps_per_bar,
                         steps_per_quarter=self.steps_per_quarter)

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(
        end_step - melody.start_step, melody, **args)
    generated_sequence = generated_melody.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Example #6
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        if input_sequence and input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm
        else:
            qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0.0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = mm.quantize_note_sequence(primer_sequence,
                                                       self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_drum_tracks, _ = drum_pipelines.extract_drum_tracks(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            gap_bars=float('inf'),
            ignore_is_drum=True)
        assert len(extracted_drum_tracks) <= 1

        start_step = mm.quantize_to_step(generate_section.start_time,
                                         steps_per_second,
                                         quantize_cutoff=0.0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        end_step = mm.quantize_to_step(generate_section.end_time,
                                       steps_per_second,
                                       quantize_cutoff=1.0)

        if extracted_drum_tracks and extracted_drum_tracks[0]:
            drums = extracted_drum_tracks[0]
        else:
            # If no drum track could be extracted, create an empty drum track that
            # starts 1 step before the request start_step. This will result in 1 step
            # of silence when the drum track is extended below.
            steps_per_bar = int(
                mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
            drums = mm.DrumTrack([],
                                 start_step=max(0, start_step - 1),
                                 steps_per_bar=steps_per_bar,
                                 steps_per_quarter=self.steps_per_quarter)

        # Ensure that the drum track extends up to the step we want to start
        # generating.
        drums.set_length(start_step - drums.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_drums = self._model.generate_drum_track(
            end_step - drums.start_step, drums, **args)
        generated_sequence = generated_drums.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
Example #7
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise mm.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise mm.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        if input_sequence and input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm
        else:
            qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            # Use primer melody from input section only. Take backing chords from
            # beginning of input section through end of generate section.
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            backing_sequence = mm.trim_note_sequence(input_sequence,
                                                     input_section.start_time,
                                                     generate_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0.0)
        else:
            # No input section. Take primer melody from the beginning of the sequence
            # up until the start of the generate section.
            primer_sequence = mm.trim_note_sequence(
                input_sequence, 0.0, generate_section.start_time)
            backing_sequence = mm.trim_note_sequence(input_sequence, 0.0,
                                                     generate_section.end_time)
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        if last_end_time >= generate_section.start_time:
            raise mm.SequenceGeneratorError(
                'Got GenerateSection request for section that is before or equal to '
                'the end of the input section. This model can only extend melodies. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming and backing sequences.
        quantized_primer_sequence = mm.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)
        quantized_backing_sequence = mm.quantize_note_sequence(
            backing_sequence, self.steps_per_quarter)

        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = mm.extract_melodies(
            quantized_primer_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        start_step = mm.quantize_to_step(generate_section.start_time,
                                         steps_per_second,
                                         quantize_cutoff=0.0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        end_step = mm.quantize_to_step(generate_section.end_time,
                                       steps_per_second,
                                       quantize_cutoff=1.0)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            # If no melody could be extracted, create an empty melody that starts 1
            # step before the request start_step. This will result in 1 step of
            # silence when the melody is extended below.
            steps_per_bar = int(
                mm.steps_per_bar_in_quantized_sequence(
                    quantized_primer_sequence))
            melody = mm.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

        extracted_chords, _ = mm.extract_chords(quantized_backing_sequence)
        chords = extracted_chords[0]

        # Make sure that chords and melody start on the same step.
        if chords.start_step < melody.start_step:
            chords.set_length(
                len(chords) - melody.start_step + chords.start_step)

        assert chords.end_step == end_step

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step - melody.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_melody = self._model.generate_melody(melody, chords, **args)
        generated_lead_sheet = mm.LeadSheet(generated_melody, chords)
        generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorError(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorError(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    # This sequence will be quantized later, so it is guaranteed to have only 1
    # tempo.
    qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
    if input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm

    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0

    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorError(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)

    extracted_seqs, _ = mm.extract_pianoroll_sequences(
        quantized_primer_sequence, start_step=input_start_step)
    assert len(extracted_seqs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_seqs and extracted_seqs[0]:
      pianoroll_seq = extracted_seqs[0]
    else:
      raise ValueError('No priming pianoroll could be extracted.')

    # Ensure that the track extends up to the step we want to start generating.
    pianoroll_seq.set_length(generate_start_step - pianoroll_seq.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    total_steps = pianoroll_seq.num_steps + (
        generate_end_step - generate_start_step)

    pianoroll_seq = self._model.generate_pianoroll_sequence(
        total_steps, pianoroll_seq, **args)
    pianoroll_seq.set_length(total_steps)

    generated_sequence = pianoroll_seq.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
Example #9
0
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    # This sequence will be quantized later, so it is guaranteed to have only 1
    # tempo.
    qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
    if input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm

    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.extract_subsequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)

    extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_primer_sequence, start_step=input_start_step)
    assert len(extracted_seqs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_seqs and extracted_seqs[0]:
      poly_seq = extracted_seqs[0]
    else:
      # If no track could be extracted, create an empty track that starts at the
      # requested generate_start_step. This will result in a sequence that
      # contains only the START token.
      poly_seq = polyphony_lib.PolyphonicSequence(
          steps_per_quarter=(
              quantized_primer_sequence.quantization_info.steps_per_quarter),
          start_step=generate_start_step)

    # Ensure that the track extends up to the step we want to start generating.
    poly_seq.set_length(generate_start_step - poly_seq.start_step)
    # Trim any trailing end events to prepare the sequence for more events to be
    # appended during generation.
    poly_seq.trim_trailing_end_events()

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    # Inject the priming sequence as melody in the output of the generator.
    # Note that start_step is 0 because we overwrite poly_seq below. If we
    # included the priming sequence in poly_seq, it would be poly_seq.num_steps.
    melody_to_inject = copy.deepcopy(poly_seq)
    args['modify_events_callback'] = partial(
        _inject_melody, melody_to_inject, 0)

    # Overwrite poly_seq with a blank sequence to feed into the generator so it
    # is conditioned only on the melody events that are injected as the sequence
    # is created. Otherwise, the generator would have to determine the most
    # likely sequence to follow a monophonic line, which is something not
    # present in the current training data (Bach Chorales).
    poly_seq = polyphony_lib.PolyphonicSequence(
        steps_per_quarter=(
            quantized_primer_sequence.quantization_info.steps_per_quarter),
        start_step=generate_start_step)
    poly_seq.trim_trailing_end_events()

    # If we wanted to include the priming sequence and didn't clear poly_seq
    # above, this is how we would calculate total_steps.
    # total_steps = poly_seq.num_steps + (
    #     generate_end_step - generate_start_step)

    total_steps = generate_end_step - generate_start_step

    while poly_seq.num_steps < total_steps:
      # Assume it takes ~5 rnn steps to generate one quantized step.
      # Can't know for sure until generation is finished because the number of
      # notes per quantized step is variable.
      steps_to_gen = total_steps - poly_seq.num_steps
      rnn_steps_to_gen = 5 * steps_to_gen
      tf.logging.info(
          'Need to generate %d more steps for this sequence, will try asking '
          'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
      poly_seq = self._model.generate_polyphonic_sequence(
          len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
    poly_seq.set_length(total_steps)

    # Specify a base_note_sequence because the priming sequence is not included
    # in poly_seq. If we did not clear poly_seq above, then we would not want to
    # specify a base_note_sequence.
    generated_sequence = poly_seq.to_sequence(
        qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorError(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorError(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    if input_sequence and input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm
    else:
      qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      # Use primer melody from input section only. Take backing chords from
      # beginning of input section through end of generate section.
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      backing_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, generate_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0.0)
    else:
      # No input section. Take primer melody from the beginning of the sequence
      # up until the start of the generate section.
      primer_sequence = mm.trim_note_sequence(
          input_sequence, 0.0, generate_section.start_time)
      backing_sequence = mm.trim_note_sequence(
          input_sequence, 0.0, generate_section.end_time)
      input_start_step = 0

    if primer_sequence.notes:
      last_end_time = max(n.end_time for n in primer_sequence.notes)
    else:
      last_end_time = 0
    if last_end_time >= generate_section.start_time:
      raise mm.SequenceGeneratorError(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the input section. This model can only extend melodies. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming and backing sequences.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)
    quantized_backing_sequence = mm.quantize_note_sequence(
        backing_sequence, self.steps_per_quarter)

    # Setting gap_bars to infinite ensures that the entire input will be used.
    extracted_melodies, _ = mm.extract_melodies(
        quantized_primer_sequence, search_start_step=input_start_step,
        min_bars=0, min_unique_pitches=1, gap_bars=float('inf'),
        ignore_polyphonic_notes=True)
    assert len(extracted_melodies) <= 1

    start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0.0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_melodies and extracted_melodies[0]:
      melody = extracted_melodies[0]
    else:
      # If no melody could be extracted, create an empty melody that starts 1
      # step before the request start_step. This will result in 1 step of
      # silence when the melody is extended below.
      steps_per_bar = int(
          mm.steps_per_bar_in_quantized_sequence(quantized_primer_sequence))
      melody = mm.Melody([],
                         start_step=max(0, start_step - 1),
                         steps_per_bar=steps_per_bar,
                         steps_per_quarter=self.steps_per_quarter)

    extracted_chords, _ = mm.extract_chords(quantized_backing_sequence)
    chords = extracted_chords[0]

    # Make sure that chords and melody start on the same step.
    if chords.start_step < melody.start_step:
      chords.set_length(len(chords) - melody.start_step + chords.start_step)

    assert chords.end_step == end_step

    # Ensure that the melody extends up to the step we want to start generating.
    melody.set_length(start_step - melody.start_step)

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    generated_melody = self._model.generate_melody(melody, chords, **args)
    generated_lead_sheet = mm.LeadSheet(generated_melody, chords)
    generated_sequence = generated_lead_sheet.to_sequence(qpm=qpm)
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
  def _generate(self, input_sequence, generator_options):
    if len(generator_options.input_sections) > 1:
      raise mm.SequenceGeneratorException(
          'This model supports at most one input_sections message, but got %s' %
          len(generator_options.input_sections))
    if len(generator_options.generate_sections) != 1:
      raise mm.SequenceGeneratorException(
          'This model supports only 1 generate_sections message, but got %s' %
          len(generator_options.generate_sections))

    # This sequence will be quantized later, so it is guaranteed to have only 1
    # tempo.
    qpm = mm.DEFAULT_QUARTERS_PER_MINUTE
    if input_sequence.tempos:
      qpm = input_sequence.tempos[0].qpm

    steps_per_second = mm.steps_per_quarter_to_steps_per_second(
        self.steps_per_quarter, qpm)

    generate_section = generator_options.generate_sections[0]
    if generator_options.input_sections:
      input_section = generator_options.input_sections[0]
      primer_sequence = mm.trim_note_sequence(
          input_sequence, input_section.start_time, input_section.end_time)
      input_start_step = mm.quantize_to_step(
          input_section.start_time, steps_per_second, quantize_cutoff=0)
    else:
      primer_sequence = input_sequence
      input_start_step = 0

    last_end_time = (max(n.end_time for n in primer_sequence.notes)
                     if primer_sequence.notes else 0)
    if last_end_time > generate_section.start_time:
      raise mm.SequenceGeneratorException(
          'Got GenerateSection request for section that is before or equal to '
          'the end of the NoteSequence. This model can only extend sequences. '
          'Requested start time: %s, Final note end time: %s' %
          (generate_section.start_time, last_end_time))

    # Quantize the priming sequence.
    quantized_primer_sequence = mm.quantize_note_sequence(
        primer_sequence, self.steps_per_quarter)

    extracted_seqs, _ = polyphony_lib.extract_polyphonic_sequences(
        quantized_primer_sequence, start_step=input_start_step)
    assert len(extracted_seqs) <= 1

    generate_start_step = mm.quantize_to_step(
        generate_section.start_time, steps_per_second, quantize_cutoff=0)
    # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
    # always rounds down. This avoids generating a sequence that ends at 5.0
    # seconds when the requested end time is 4.99.
    generate_end_step = mm.quantize_to_step(
        generate_section.end_time, steps_per_second, quantize_cutoff=1.0)

    if extracted_seqs and extracted_seqs[0]:
      poly_seq = extracted_seqs[0]
    else:
      # If no track could be extracted, create an empty track that starts at the
      # requested generate_start_step. This will result in a sequence that
      # contains only the START token.
      poly_seq = polyphony_lib.PolyphonicSequence(
          steps_per_quarter=(
              quantized_primer_sequence.quantization_info.steps_per_quarter),
          start_step=generate_start_step)

    # Ensure that the track extends up to the step we want to start generating.
    poly_seq.set_length(generate_start_step - poly_seq.start_step)
    # Trim any trailing end events to prepare the sequence for more events to be
    # appended during generation.
    poly_seq.trim_trailing_end_events()

    # Extract generation arguments from generator options.
    arg_types = {
        'temperature': lambda arg: arg.float_value,
        'beam_size': lambda arg: arg.int_value,
        'branch_factor': lambda arg: arg.int_value,
        'steps_per_iteration': lambda arg: arg.int_value
    }
    args = dict((name, value_fn(generator_options.args[name]))
                for name, value_fn in arg_types.items()
                if name in generator_options.args)

    # Inject the priming sequence as melody in the output of the generator, if
    # requested.
    # This option starts with no_ so that if it is unspecified (as will be the
    # case when used with the midi interface), the default will be to inject the
    # primer.
    if not (generator_options.args[
        'no_inject_primer_during_generation'].bool_value):
      melody_to_inject = copy.deepcopy(poly_seq)
      if generator_options.args['condition_on_primer'].bool_value:
        inject_start_step = poly_seq.num_steps
      else:
        # 0 steps because we'll overwrite poly_seq with a blank sequence below.
        inject_start_step = 0

      args['modify_events_callback'] = partial(
          _inject_melody, melody_to_inject, inject_start_step)

    # If we don't want to condition on the priming sequence, then overwrite
    # poly_seq with a blank sequence to feed into the generator.
    if not generator_options.args['condition_on_primer'].bool_value:
      poly_seq = polyphony_lib.PolyphonicSequence(
          steps_per_quarter=(
              quantized_primer_sequence.quantization_info.steps_per_quarter),
          start_step=generate_start_step)
      poly_seq.trim_trailing_end_events()

    total_steps = poly_seq.num_steps + (
        generate_end_step - generate_start_step)

    while poly_seq.num_steps < total_steps:
      # Assume it takes ~5 rnn steps to generate one quantized step.
      # Can't know for sure until generation is finished because the number of
      # notes per quantized step is variable.
      steps_to_gen = total_steps - poly_seq.num_steps
      rnn_steps_to_gen = 5 * steps_to_gen
      tf.logging.info(
          'Need to generate %d more steps for this sequence, will try asking '
          'for %d RNN steps' % (steps_to_gen, rnn_steps_to_gen))
      poly_seq = self._model.generate_polyphonic_sequence(
          len(poly_seq) + rnn_steps_to_gen, poly_seq, **args)
    poly_seq.set_length(total_steps)

    if generator_options.args['condition_on_primer'].bool_value:
      generated_sequence = poly_seq.to_sequence(qpm=qpm)
    else:
      # Specify a base_note_sequence because the priming sequence was not
      # included in poly_seq.
      generated_sequence = poly_seq.to_sequence(
          qpm=qpm, base_note_sequence=copy.deepcopy(primer_sequence))
    assert (generated_sequence.total_time - generate_section.end_time) <= 1e-5
    return generated_sequence
    def _primer_melody_to_event_sequence(self, input_sequence,
                                         generator_options, config):

        qpm = (input_sequence.tempos[0].qpm if input_sequence
               and input_sequence.tempos else mm.DEFAULT_QUARTERS_PER_MINUTE)
        steps_per_second = mm.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = mm.trim_note_sequence(input_sequence,
                                                    input_section.start_time,
                                                    input_section.end_time)
            input_start_step = mm.quantize_to_step(input_section.start_time,
                                                   steps_per_second,
                                                   quantize_cutoff=0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        last_end_time = (max(
            n.end_time
            for n in primer_sequence.notes) if primer_sequence.notes else 0)
        if last_end_time > generate_section.start_time:
            raise mm.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = mm.quantize_note_sequence(primer_sequence,
                                                       self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = mm.extract_melodies(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        start_step = mm.quantize_to_step(generate_section.start_time,
                                         steps_per_second,
                                         quantize_cutoff=0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        end_step = mm.quantize_to_step(generate_section.end_time,
                                       steps_per_second,
                                       quantize_cutoff=1.0)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            # If no melody could be extracted, create an empty melody that starts 1
            # step before the request start_step. This will result in 1 step of
            # silence when the melody is extended below.
            steps_per_bar = int(
                mm.steps_per_bar_in_quantized_sequence(quantized_sequence))
            melody = mm.Melody([],
                               start_step=max(0, start_step - 1),
                               steps_per_bar=steps_per_bar,
                               steps_per_quarter=self.steps_per_quarter)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step - melody.start_step - 2)

        now_encoding = config.encoder_decoder._one_hot_encoding

        # Extract generation arguments from generator options.
        primer_events = self._model.primer_melody_to_events(
            end_step - melody.start_step, melody)

        for i, event in enumerate(primer_events):
            primer_events[i] = now_encoding.encode_event(event)

        return primer_events