Beispiel #1
0
  def testExtractDrumTracksTooShort(self):
    music_testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 6, 7)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drum_pipelines.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual([], drum_tracks)

    del self.note_sequence.notes[:]
    music_testing_lib.add_track_to_sequence(
        self.note_sequence, 0,
        [(12, 127, 3, 4), (14, 50, 7, 8)],
        is_drum=True)
    quantized_sequence = sequences_lib.quantize_note_sequence(
        self.note_sequence, steps_per_quarter=1)
    drum_tracks, _ = drum_pipelines.extract_drum_tracks(
        quantized_sequence, min_bars=2, gap_bars=1)
    drum_tracks = [list(drums) for drums in drum_tracks]
    self.assertEqual(
        [[NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
          DRUMS(14)]],
        drum_tracks)
Beispiel #2
0
    def testExtractDrumTracksSimple(self):
        music_testing_lib.add_track_to_sequence(self.note_sequence,
                                                0, [(12, 100, 2, 4),
                                                    (11, 1, 6, 7)],
                                                is_drum=True)
        music_testing_lib.add_track_to_sequence(self.note_sequence,
                                                1, [(12, 127, 2, 4),
                                                    (14, 50, 6, 9)],
                                                is_drum=True)
        quantized_sequence = sequences_lib.quantize_note_sequence(
            self.note_sequence, steps_per_quarter=1)
        expected = [[
            NO_DRUMS, NO_DRUMS,
            DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
            DRUMS(11, 14)
        ]]
        drum_tracks, _ = drum_pipelines.extract_drum_tracks(quantized_sequence,
                                                            min_bars=1,
                                                            gap_bars=1)

        self.assertEqual(1, len(drum_tracks))
        self.assertIsInstance(drum_tracks[0], drums_lib.DrumTrack)

        drum_tracks = sorted([list(drums) for drums in drum_tracks])
        self.assertEqual(expected, drum_tracks)
Beispiel #3
0
 def testExtractDrumTracksPadEnd(self):
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             0, [(12, 127, 2, 4),
                                                 (14, 50, 6, 7)],
                                             is_drum=True)
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             1, [(12, 127, 2, 4),
                                                 (15, 50, 6, 8)],
                                             is_drum=True)
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             2, [(12, 127, 2, 4),
                                                 (16, 50, 8, 9)],
                                             is_drum=True)
     quantized_sequence = sequences_lib.quantize_note_sequence(
         self.note_sequence, steps_per_quarter=1)
     expected = [[
         NO_DRUMS, NO_DRUMS,
         DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(14, 15), NO_DRUMS,
         DRUMS(16), NO_DRUMS, NO_DRUMS, NO_DRUMS
     ]]
     drum_tracks, _ = drum_pipelines.extract_drum_tracks(quantized_sequence,
                                                         min_bars=1,
                                                         gap_bars=1,
                                                         pad_end=True)
     drum_tracks = [list(drums) for drums in drum_tracks]
     self.assertEqual(expected, drum_tracks)
Beispiel #4
0
 def testExtractMultipleDrumTracks(self):
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             0, [(12, 100, 2, 4),
                                                 (11, 1, 6, 11)],
                                             is_drum=True)
     music_testing_lib.add_track_to_sequence(self.note_sequence,
                                             1, [(12, 127, 2, 4),
                                                 (14, 50, 6, 8),
                                                 (50, 100, 33, 37),
                                                 (52, 100, 37, 38)],
                                             is_drum=True)
     quantized_sequence = sequences_lib.quantize_note_sequence(
         self.note_sequence, steps_per_quarter=1)
     expected = [[
         NO_DRUMS, NO_DRUMS,
         DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(11, 14)
     ], [NO_DRUMS,
         DRUMS(50), NO_DRUMS, NO_DRUMS, NO_DRUMS,
         DRUMS(52)]]
     drum_tracks, _ = drum_pipelines.extract_drum_tracks(quantized_sequence,
                                                         min_bars=1,
                                                         gap_bars=2)
     drum_tracks = sorted([list(drums) for drums in drum_tracks])
     self.assertEqual(expected, drum_tracks)
Beispiel #5
0
 def testExtractDrumTracksLateStart(self):
   music_testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 100, 102, 103), (13, 100, 104, 106)],
       is_drum=True)
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   expected = [[NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, DRUMS(13)]]
   drum_tracks, _ = drum_pipelines.extract_drum_tracks(
       quantized_sequence, min_bars=1, gap_bars=1)
   drum_tracks = sorted([list(drums) for drums in drum_tracks])
   self.assertEqual(expected, drum_tracks)
Beispiel #6
0
 def testExtractDrumTracksTooLongDiscard(self):
   music_testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 127, 2, 4), (14, 50, 6, 15), (14, 50, 10, 15), (16, 100, 14, 19),
        (14, 100, 18, 19)],
       is_drum=True)
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   drum_tracks, _ = drum_pipelines.extract_drum_tracks(
       quantized_sequence, min_bars=1, max_steps_discard=18, gap_bars=1)
   drum_tracks = [list(drums) for drums in drum_tracks]
   self.assertEqual([], drum_tracks)
Beispiel #7
0
 def testExtractDrumTracksTooLongTruncate(self):
   music_testing_lib.add_track_to_sequence(
       self.note_sequence, 0,
       [(12, 127, 2, 4), (14, 50, 6, 15), (14, 50, 10, 15), (16, 100, 14, 19)],
       is_drum=True)
   quantized_sequence = sequences_lib.quantize_note_sequence(
       self.note_sequence, steps_per_quarter=1)
   expected = [[NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS,
                DRUMS(14), NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(14), NO_DRUMS,
                NO_DRUMS, NO_DRUMS]]
   drum_tracks, _ = drum_pipelines.extract_drum_tracks(
       quantized_sequence, min_bars=1, max_steps_truncate=14, gap_bars=1)
   drum_tracks = [list(drums) for drums in drum_tracks]
   self.assertEqual(expected, drum_tracks)
Beispiel #8
0
    def _generate(self, input_sequence, generator_options):
        if len(generator_options.input_sections) > 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports at most one input_sections message, but got %s'
                % len(generator_options.input_sections))
        if len(generator_options.generate_sections) != 1:
            raise sequence_generator.SequenceGeneratorError(
                'This model supports only 1 generate_sections message, but got %s'
                % len(generator_options.generate_sections))

        if input_sequence and input_sequence.tempos:
            qpm = input_sequence.tempos[0].qpm
        else:
            qpm = note_seq.DEFAULT_QUARTERS_PER_MINUTE
        steps_per_second = note_seq.steps_per_quarter_to_steps_per_second(
            self.steps_per_quarter, qpm)

        generate_section = generator_options.generate_sections[0]
        if generator_options.input_sections:
            input_section = generator_options.input_sections[0]
            primer_sequence = note_seq.trim_note_sequence(
                input_sequence, input_section.start_time,
                input_section.end_time)
            input_start_step = note_seq.quantize_to_step(
                input_section.start_time,
                steps_per_second,
                quantize_cutoff=0.0)
        else:
            primer_sequence = input_sequence
            input_start_step = 0

        if primer_sequence.notes:
            last_end_time = max(n.end_time for n in primer_sequence.notes)
        else:
            last_end_time = 0
        if last_end_time > generate_section.start_time:
            raise sequence_generator.SequenceGeneratorError(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. Requested '
                'start time: %s, Final note end time: %s' %
                (generate_section.start_time, last_end_time))

        # Quantize the priming sequence.
        quantized_sequence = note_seq.quantize_note_sequence(
            primer_sequence, self.steps_per_quarter)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_drum_tracks, _ = drum_pipelines.extract_drum_tracks(
            quantized_sequence,
            search_start_step=input_start_step,
            min_bars=0,
            gap_bars=float('inf'),
            ignore_is_drum=True)
        assert len(extracted_drum_tracks) <= 1

        start_step = note_seq.quantize_to_step(generate_section.start_time,
                                               steps_per_second,
                                               quantize_cutoff=0.0)
        # Note that when quantizing end_step, we set quantize_cutoff to 1.0 so it
        # always rounds down. This avoids generating a sequence that ends at 5.0
        # seconds when the requested end time is 4.99.
        end_step = note_seq.quantize_to_step(generate_section.end_time,
                                             steps_per_second,
                                             quantize_cutoff=1.0)

        if extracted_drum_tracks and extracted_drum_tracks[0]:
            drums = extracted_drum_tracks[0]
        else:
            # If no drum track could be extracted, create an empty drum track that
            # starts 1 step before the request start_step. This will result in 1 step
            # of silence when the drum track is extended below.
            steps_per_bar = int(
                note_seq.steps_per_bar_in_quantized_sequence(
                    quantized_sequence))
            drums = note_seq.DrumTrack(
                [],
                start_step=max(0, start_step - 1),
                steps_per_bar=steps_per_bar,
                steps_per_quarter=self.steps_per_quarter)

        # Ensure that the drum track extends up to the step we want to start
        # generating.
        drums.set_length(start_step - drums.start_step)

        # Extract generation arguments from generator options.
        arg_types = {
            'temperature': lambda arg: arg.float_value,
            'beam_size': lambda arg: arg.int_value,
            'branch_factor': lambda arg: arg.int_value,
            'steps_per_iteration': lambda arg: arg.int_value
        }
        args = dict((name, value_fn(generator_options.args[name]))
                    for name, value_fn in arg_types.items()
                    if name in generator_options.args)

        generated_drums = self._model.generate_drum_track(
            end_step - drums.start_step, drums, **args)
        generated_sequence = generated_drums.to_sequence(qpm=qpm)
        assert (generated_sequence.total_time -
                generate_section.end_time) <= 1e-5
        return generated_sequence