Exemple #1
0
    def checkFMajorScale(self, filename):
        """Verify MusicXML scale file.

    Verify that it contains the correct pitches (sounding pitch) and durations.

    Args:
      filename: file to test.
    """

        # Expected QuantizedSequence
        # Sequence tuple = (midi_pitch, velocity, start_seconds, end_seconds)
        expected_quantized_sequence = sequences_lib.QuantizedSequence()
        expected_quantized_sequence.steps_per_quarter = self.steps_per_quarter
        expected_quantized_sequence.qpm = 120.0
        expected_quantized_sequence.time_signature = (
            sequences_lib.QuantizedSequence.TimeSignature(numerator=4,
                                                          denominator=4))
        testing_lib.add_quantized_track_to_sequence(
            expected_quantized_sequence, 0,
            [(65, 64, 0, 4), (67, 64, 4, 8), (69, 64, 8, 12), (70, 64, 12, 16),
             (72, 64, 16, 20), (74, 64, 20, 24), (76, 64, 24, 28),
             (77, 64, 28, 32)])

        # Convert MusicXML to QuantizedSequence
        source_musicxml = musicxml_parser.MusicXMLDocument(filename)
        sequence_proto = musicxml_reader.musicxml_to_sequence_proto(
            source_musicxml)
        quantized = sequences_lib.QuantizedSequence()
        quantized.from_note_sequence(sequence_proto, self.steps_per_quarter)

        # Check equality
        self.assertEqual(expected_quantized_sequence, quantized)
 def testEq(self):
     left_hand = sequences_lib.QuantizedSequence()
     left_hand.qpm = 123.0
     left_hand.steps_per_quarter = 7
     left_hand.time_signature = sequences_lib.TimeSignature(7, 8)
     testing_lib.add_quantized_track(left_hand, 0, [(12, 100, 0, 40),
                                                    (11, 100, 1, 2)])
     testing_lib.add_quantized_track(left_hand, 2, [(55, 100, 4, 6),
                                                    (14, 120, 4, 10)])
     testing_lib.add_quantized_track(left_hand, 3, [(1, 10, 0, 6),
                                                    (2, 50, 20, 21),
                                                    (0, 101, 17, 21)])
     testing_lib.add_quantized_chords(left_hand, [('Cmaj7', 1), ('G9', 2)])
     right_hand = sequences_lib.QuantizedSequence()
     right_hand.qpm = 123.0
     right_hand.steps_per_quarter = 7
     right_hand.time_signature = sequences_lib.TimeSignature(7, 8)
     testing_lib.add_quantized_track(right_hand, 0, [(11, 100, 1, 2),
                                                     (12, 100, 0, 40)])
     testing_lib.add_quantized_track(right_hand, 2, [(14, 120, 4, 10),
                                                     (55, 100, 4, 6)])
     testing_lib.add_quantized_track(right_hand, 3, [(0, 101, 17, 21),
                                                     (2, 50, 20, 21),
                                                     (1, 10, 0, 6)])
     testing_lib.add_quantized_chords(right_hand, [('G9', 2), ('Cmaj7', 1)])
     self.assertEqual(left_hand, right_hand)
 def testNotEq(self):
   left_hand = sequences_lib.QuantizedSequence()
   left_hand.bpm = 123.0
   left_hand.steps_per_beat = 7
   left_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(
       numerator=7, denominator=8)
   testing_lib.add_quantized_track_to_sequence(
       left_hand, 0,
       [(12, 100, 0, 40), (11, 100, 1, 2)])
   testing_lib.add_quantized_track_to_sequence(
       left_hand, 2,
       [(55, 100, 4, 6), (15, 120, 4, 10)])
   testing_lib.add_quantized_track_to_sequence(
       left_hand, 3,
       [(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])
   testing_lib.add_quantized_chords_to_sequence(
       left_hand, [('Cmaj7', 1), ('G9', 2)])
   right_hand = sequences_lib.QuantizedSequence()
   right_hand.bpm = 123.0
   right_hand.steps_per_beat = 7
   right_hand.time_signature = sequences_lib.QuantizedSequence.TimeSignature(
       numerator=7, denominator=8)
   testing_lib.add_quantized_track_to_sequence(
       right_hand, 0,
       [(11, 100, 1, 2), (12, 100, 0, 40)])
   testing_lib.add_quantized_track_to_sequence(
       right_hand, 2,
       [(14, 120, 4, 10), (55, 100, 4, 6)])
   testing_lib.add_quantized_track_to_sequence(
       right_hand, 3,
       [(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])
   testing_lib.add_quantized_chords_to_sequence(
       right_hand, [('G9', 2), ('C7', 1)])
   self.assertNotEqual(left_hand, right_hand)
Exemple #4
0
def midi_file_to_drum_track(midi_file, steps_per_quarter=4, qpm=None):
  """Loads a drum track from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of DrumTrack. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.

  Returns:
    A DrumTrack object extracted from the MIDI file.
  """
  sequence = midi_io.midi_file_to_sequence_proto(midi_file)
  if qpm is None:
    if sequence.tempos:
      qpm = sequence.tempos[0].qpm
    else:
      qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
  quantized_sequence = sequences_lib.QuantizedSequence()
  quantized_sequence.qpm = qpm
  quantized_sequence.from_note_sequence(
      sequence, steps_per_quarter=steps_per_quarter)
  drum_track = DrumTrack()
  drum_track.from_quantized_sequence(quantized_sequence)
  return drum_track
Exemple #5
0
def midi_file_to_melody(midi_file,
                        steps_per_quarter=4,
                        qpm=None,
                        ignore_polyphonic_notes=True):
    """Loads a melody from a MIDI file.

  Args:
    midi_file: Absolute path to MIDI file.
    steps_per_quarter: Quantization of Melody. For example, 4 = 16th notes.
    qpm: Tempo in quarters per a minute. If not set, tries to use the first
        tempo of the midi track and defaults to
        magenta.music.DEFAULT_QUARTERS_PER_MINUTE if fails.
    ignore_polyphonic_notes: Only use the highest simultaneous note if True.

  Returns:
    A Melody object extracted from the MIDI file.
  """
    sequence = midi_io.midi_file_to_sequence_proto(midi_file)
    if qpm is None:
        if sequence.tempos:
            qpm = sequence.tempos[0].qpm
        else:
            qpm = constants.DEFAULT_QUARTERS_PER_MINUTE
    quantized_sequence = sequences_lib.QuantizedSequence()
    quantized_sequence.qpm = qpm
    quantized_sequence.from_note_sequence(sequence,
                                          steps_per_quarter=steps_per_quarter)
    melody = Melody()
    melody.from_quantized_sequence(
        quantized_sequence, ignore_polyphonic_notes=ignore_polyphonic_notes)
    return melody
 def testMultiTrack(self):
     testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                       [(12, 100, 1.0, 4.0),
                                        (19, 100, 0.95, 3.0)])
     testing_lib.add_track_to_sequence(self.note_sequence, 3,
                                       [(12, 100, 1.0, 4.0),
                                        (19, 100, 2.0, 5.0)])
     testing_lib.add_track_to_sequence(self.note_sequence, 7,
                                       [(12, 100, 1.0, 5.0),
                                        (19, 100, 2.0, 4.0),
                                        (24, 100, 3.0, 3.5)])
     testing_lib.add_quantized_track_to_sequence(
         self.expected_quantized_sequence, 0, [(12, 100, 4, 16),
                                               (19, 100, 4, 12)])
     testing_lib.add_quantized_track_to_sequence(
         self.expected_quantized_sequence, 3, [(12, 100, 4, 16),
                                               (19, 100, 8, 20)])
     testing_lib.add_quantized_track_to_sequence(
         self.expected_quantized_sequence, 7, [(12, 100, 4, 20),
                                               (19, 100, 8, 16),
                                               (24, 100, 12, 14)])
     quantized = sequences_lib.QuantizedSequence()
     quantized.from_note_sequence(self.note_sequence,
                                  self.steps_per_quarter)
     self.assertEqual(self.expected_quantized_sequence, quantized)
    def testFromNoteSequence_TimeSignatureChange(self):
        testing_lib.add_track_to_sequence(self.note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        del self.note_sequence.time_signatures[:]
        quantized = sequences_lib.QuantizedSequence()
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Single time signature.
        self.note_sequence.time_signatures.add(numerator=4,
                                               denominator=4,
                                               time=0)
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Multiple time signatures with no change.
        self.note_sequence.time_signatures.add(numerator=4,
                                               denominator=4,
                                               time=1)
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Time signature change.
        self.note_sequence.time_signatures.add(numerator=2,
                                               denominator=4,
                                               time=2)
        with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
            quantized.from_note_sequence(self.note_sequence,
                                         self.steps_per_quarter)
Exemple #8
0
    def testQuantizer(self):
        steps_per_quarter = 4
        note_sequence = common_testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track_to_sequence(note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        expected_quantized_sequence = sequences_lib.QuantizedSequence()
        expected_quantized_sequence.qpm = 60.0
        expected_quantized_sequence.steps_per_quarter = steps_per_quarter
        testing_lib.add_quantized_track_to_sequence(
            expected_quantized_sequence, 0,
            [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
             (55, 120, 16, 17), (52, 99, 19, 20)])

        unit = pipelines_common.Quantizer(steps_per_quarter)
        self._unit_transform_test(unit, note_sequence,
                                  [expected_quantized_sequence])
  def testStepsPerBar(self):
    quantized = sequences_lib.QuantizedSequence()
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(16, quantized.steps_per_bar())

    self.note_sequence.time_signatures[0].numerator = 6
    self.note_sequence.time_signatures[0].denominator = 8
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(12.0, quantized.steps_per_bar())
Exemple #10
0
 def testRounding(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 1,
       [(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
        (41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
   testing_lib.add_quantized_track_to_sequence(
       self.expected_quantized_sequence, 1,
       [(12, 100, 0, 1), (11, 100, 1, 2), (40, 100, 2, 3),
        (41, 100, 3, 5), (44, 100, 5, 7), (55, 100, 16, 17)])
   quantized = sequences_lib.QuantizedSequence()
   quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
   self.assertEqual(self.expected_quantized_sequence, quantized)
 def setUp(self):
     self.steps_per_quarter = 4
     self.note_sequence = common_testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       qpm: 60}""")
     self.expected_quantized_sequence = sequences_lib.QuantizedSequence()
     self.expected_quantized_sequence.qpm = 60.0
     self.expected_quantized_sequence.steps_per_quarter = self.steps_per_quarter
Exemple #12
0
 def transform(self, note_sequence):
     quantized_sequence = sequences_lib.QuantizedSequence()
     try:
         quantized_sequence.from_note_sequence(note_sequence,
                                               self._steps_per_quarter)
         return [quantized_sequence]
     except sequences_lib.MultipleTimeSignatureException:
         tf.logging.debug('Multiple time signatures found in NoteSequence')
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_time_signatures', 1)
         ])
         return []
Exemple #13
0
  def testFromNoteSequence_ImplicitTempoChange(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.tempos[:]
    quantized = sequences_lib.QuantizedSequence()

    # No tempo.
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # Implicit tempo change.
    self.note_sequence.tempos.add(qpm=60, time=2)
    with self.assertRaises(sequences_lib.MultipleTempoException):
      quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
Exemple #14
0
  def testFromNoteSequence_NoImplicitTempoChangeOutOfOrder(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.tempos[:]
    quantized = sequences_lib.QuantizedSequence()

    # No tempo.
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # No implicit tempo change, but tempos are added out of order.
    self.note_sequence.tempos.add(qpm=60, time=2)
    self.note_sequence.tempos.add(qpm=60, time=0)
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
Exemple #15
0
  def testDeepcopy(self):
    quantized = sequences_lib.QuantizedSequence()
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    quantized_copy = copy.deepcopy(quantized)
    self.assertEqual(quantized, quantized_copy)

    testing_lib.add_quantized_track_to_sequence(
        quantized, 1,
        [(12, 100, 4, 20), (19, 100, 8, 16), (24, 100, 12, 14)])

    self.assertNotEqual(quantized, quantized_copy)
Exemple #16
0
  def testFromNoteSequence_NoImplicitTimeSignatureChangeOutOfOrder(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    del self.note_sequence.time_signatures[:]
    quantized = sequences_lib.QuantizedSequence()

    # No time signature.
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)

    # No implicit time signature change, but time signatures are added out of
    # order.
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=2)
    self.note_sequence.time_signatures.add(numerator=2, denominator=4, time=0)
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
Exemple #17
0
 def testChordsExtractor(self):
     quantized_sequence = sequences_lib.QuantizedSequence()
     quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_chords_to_sequence(quantized_sequence,
                                                  [('C', 2), ('Am', 4),
                                                   ('F', 5)])
     quantized_sequence.total_steps = 8
     expected_events = [[NO_CHORD, NO_CHORD, 'C', 'C', 'Am', 'F', 'F', 'F']]
     expected_chord_progressions = []
     for events_list in expected_events:
         chords = chords_lib.ChordProgression(events_list,
                                              steps_per_quarter=1,
                                              steps_per_bar=4)
         expected_chord_progressions.append(chords)
     unit = pipelines_common.ChordsExtractor(all_transpositions=False)
     self._unit_transform_test(unit, quantized_sequence,
                               expected_chord_progressions)
Exemple #18
0
 def testFromNoteSequence(self):
   testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
        (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
   testing_lib.add_chords_to_sequence(
       self.note_sequence,
       [('B7', 0.22), ('Em9', 4.0)])
   testing_lib.add_quantized_track_to_sequence(
       self.expected_quantized_sequence, 0,
       [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
        (55, 120, 16, 17), (52, 99, 19, 20)])
   testing_lib.add_quantized_chords_to_sequence(
       self.expected_quantized_sequence,
       [('B7', 1), ('Em9', 16)])
   quantized = sequences_lib.QuantizedSequence()
   quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
   self.assertEqual(self.expected_quantized_sequence, quantized)
Exemple #19
0
    def load_primer(self):
        """Loads default MIDI primer file.

    Also assigns the steps per bar of this file to be the model's defaults.
    """

        if not os.path.exists(self.midi_primer):
            tf.logging.warn('ERROR! No such primer file exists! %s',
                            self.midi_primer)
            return

        self.primer_sequence = midi_io.midi_file_to_sequence_proto(
            self.midi_primer)
        quantized_seq = sequences_lib.QuantizedSequence()
        quantized_seq.from_note_sequence(self.primer_sequence,
                                         steps_per_quarter=4)
        extracted_melodies, _ = melodies_lib.extract_melodies(
            quantized_seq, min_bars=0, min_unique_pitches=1)
        self.primer = extracted_melodies[0]
        self.steps_per_bar = self.primer.steps_per_bar
Exemple #20
0
 def testDrumsExtractor(self):
   quantized_sequence = sequences_lib.QuantizedSequence()
   quantized_sequence.steps_per_quarter = 1
   testing_lib.add_quantized_track_to_sequence(
       quantized_sequence, 0,
       [(12, 100, 2, 4), (11, 1, 6, 7), (12, 1, 6, 8)],
       is_drum=True)
   testing_lib.add_quantized_track_to_sequence(
       quantized_sequence, 1,
       [(12, 127, 2, 4), (14, 50, 6, 8)])
   expected_events = [
       [NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
        DRUMS(11, 12)]]
   expected_drum_tracks = []
   for events_list in expected_events:
     drums = drums_lib.DrumTrack(
         events_list, steps_per_quarter=1, steps_per_bar=4)
     expected_drum_tracks.append(drums)
   unit = drum_pipelines.DrumsExtractor(min_bars=1, gap_bars=1)
   self._unit_transform_test(unit, quantized_sequence, expected_drum_tracks)
Exemple #21
0
  def testFilterDrums(self):
    testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 100, 1.0, 4.0), (19, 100, 0.95, 3.0)])
    testing_lib.add_track_to_sequence(
        self.note_sequence, 3,
        [(12, 100, 1.0, 4.0), (19, 100, 2.0, 5.0)])

    # Make instrument 0 a drum.
    for note in self.note_sequence.notes:
      if note.instrument == 0:
        note.is_drum = True

    testing_lib.add_quantized_track_to_sequence(
        self.expected_quantized_sequence, 3,
        [(12, 100, 4, 16), (19, 100, 8, 20)])

    quantized = sequences_lib.QuantizedSequence()
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(self.expected_quantized_sequence, quantized)
Exemple #22
0
 def testMonophonicMelodyExtractor(self):
     quantized_sequence = sequences_lib.QuantizedSequence()
     quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track(quantized_sequence, 0,
                                     [(12, 100, 2, 4), (11, 1, 6, 7)])
     testing_lib.add_quantized_track(quantized_sequence, 1,
                                     [(12, 127, 2, 4), (14, 50, 6, 8)])
     expected_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_melodies = []
     for events_list in expected_events:
         melody = melodies_lib.MonophonicMelody()
         melody.from_event_list(events_list,
                                steps_per_quarter=1,
                                steps_per_bar=4)
         expected_melodies.append(melody)
     unit = pipelines_common.MonophonicMelodyExtractor(min_bars=1,
                                                       min_unique_pitches=1,
                                                       gap_bars=1)
     self._unit_transform_test(unit, quantized_sequence, expected_melodies)
 def setUp(self):
   self.quantized_sequence = sequences_lib.QuantizedSequence()
   self.quantized_sequence.qpm = 60.0
   self.quantized_sequence.steps_per_quarter = 4
Exemple #24
0
    def _generate(self, generate_sequence_request):
        if len(generate_sequence_request.generator_options.generate_sections
               ) != 1:
            raise sequence_generator.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % (len(generate_sequence_request.generator_options.
                       generate_sections)))

        generate_section = (
            generate_sequence_request.generator_options.generate_sections[0])
        primer_sequence = generate_sequence_request.input_sequence

        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        if last_end_time > generate_section.start_time_seconds:
            raise sequence_generator.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time_seconds,
                 notes_by_end_time[-1].end_time))

        # Quantize the priming sequence.
        quantized_sequence = sequences_lib.QuantizedSequence()
        quantized_sequence.from_note_sequence(primer_sequence,
                                              self._steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = melodies_lib.extract_melodies(
            quantized_sequence,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        qpm = (primer_sequence.tempos[0].qpm
               if primer_sequence and primer_sequence.tempos else
               constants.DEFAULT_QUARTERS_PER_MINUTE)
        start_step = self._seconds_to_steps(
            generate_section.start_time_seconds, qpm)
        end_step = self._seconds_to_steps(generate_section.end_time_seconds,
                                          qpm)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            tf.logging.warn(
                'No melodies were extracted from the priming sequence. '
                'Melodies will be generated from scratch.')
            melody = melodies_lib.MonophonicMelody()
            melody.from_event_list([
                random.randint(self._melody_encoder_decoder.min_note,
                               self._melody_encoder_decoder.max_note)
            ])
            start_step += 1

        transpose_amount = melody.squash(
            self._melody_encoder_decoder.min_note,
            self._melody_encoder_decoder.max_note,
            self._melody_encoder_decoder.transpose_to_key)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step)

        inputs = self._session.graph.get_collection('inputs')[0]
        initial_state = self._session.graph.get_collection('initial_state')[0]
        final_state = self._session.graph.get_collection('final_state')[0]
        softmax = self._session.graph.get_collection('softmax')[0]

        final_state_ = None
        for i in range(end_step - len(melody)):
            if i == 0:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody], full_length=True)
                initial_state_ = self._session.run(initial_state)
            else:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody])
                initial_state_ = final_state_

            feed_dict = {inputs: inputs_, initial_state: initial_state_}
            final_state_, softmax_ = self._session.run([final_state, softmax],
                                                       feed_dict)
            self._melody_encoder_decoder.extend_event_sequences([melody],
                                                                softmax_)

        melody.transpose(-transpose_amount)

        generate_response = generator_pb2.GenerateSequenceResponse()
        generate_response.generated_sequence.CopyFrom(
            melody.to_sequence(qpm=qpm))
        return generate_response