Esempio n. 1
0
 def testEq(self):
   left_hand = sequences_lib.QuantizedSequence()
   left_hand.qpm = 123.0
   left_hand.steps_per_quarter = 7
   left_hand.time_signature = sequences_lib.TimeSignature(7, 8)
   testing_lib.add_quantized_track(
       left_hand, 0,
       [(12, 100, 0, 40), (11, 100, 1, 2)])
   testing_lib.add_quantized_track(
       left_hand, 2,
       [(55, 100, 4, 6), (14, 120, 4, 10)])
   testing_lib.add_quantized_track(
       left_hand, 3,
       [(1, 10, 0, 6), (2, 50, 20, 21), (0, 101, 17, 21)])
   right_hand = sequences_lib.QuantizedSequence()
   right_hand.qpm = 123.0
   right_hand.steps_per_quarter = 7
   right_hand.time_signature = sequences_lib.TimeSignature(7, 8)
   testing_lib.add_quantized_track(
       right_hand, 0,
       [(11, 100, 1, 2), (12, 100, 0, 40)])
   testing_lib.add_quantized_track(
       right_hand, 2,
       [(14, 120, 4, 10), (55, 100, 4, 6)])
   testing_lib.add_quantized_track(
       right_hand, 3,
       [(0, 101, 17, 21), (2, 50, 20, 21), (1, 10, 0, 6)])
   self.assertEqual(left_hand, right_hand)
Esempio n. 2
0
    def testFromNoteSequence_TimeSignatureChange(self):
        testing_lib.add_track(self.note_sequence, 0, [(12, 100, 0.01, 10.0),
                                                      (11, 55, 0.22, 0.50),
                                                      (40, 45, 2.50, 3.50),
                                                      (55, 120, 4.0, 4.01),
                                                      (52, 99, 4.75, 5.0)])
        del self.note_sequence.time_signatures[:]
        quantized = sequences_lib.QuantizedSequence()
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Single time signature.
        self.note_sequence.time_signatures.add(numerator=4, denominator=4)
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Multiple time signatures with no change.
        self.note_sequence.time_signatures.add(numerator=4, denominator=4)
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        # Time signature change.
        self.note_sequence.time_signatures.add(numerator=2, denominator=4)
        with self.assertRaises(sequences_lib.MultipleTimeSignatureException):
            quantized.from_note_sequence(self.note_sequence,
                                         self.steps_per_quarter)
    def testQuantizer(self):
        steps_per_quarter = 4
        note_sequence = testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track(note_sequence, 0, [(12, 100, 0.01, 10.0),
                                                 (11, 55, 0.22, 0.50),
                                                 (40, 45, 2.50, 3.50),
                                                 (55, 120, 4.0, 4.01),
                                                 (52, 99, 4.75, 5.0)])
        expected_quantized_sequence = sequences_lib.QuantizedSequence()
        expected_quantized_sequence.qpm = 60.0
        expected_quantized_sequence.steps_per_quarter = steps_per_quarter
        testing_lib.add_quantized_track(expected_quantized_sequence, 0,
                                        [(12, 100, 0, 40), (11, 55, 1, 2),
                                         (40, 45, 10, 14), (55, 120, 16, 17),
                                         (52, 99, 19, 20)])

        unit = pipelines_common.Quantizer(steps_per_quarter)
        self._unit_transform_test(unit, note_sequence,
                                  [expected_quantized_sequence])
Esempio n. 4
0
  def testMonophonicMelodyExtractor(self):
    quantized_sequence = sequences_lib.QuantizedSequence()
    quantized_sequence.steps_per_beat = 1
    testing_lib.add_quantized_track(
        quantized_sequence, 0,
        [(12, 100, 2, 4), (11, 1, 6, 7)])
    testing_lib.add_quantized_track(
        quantized_sequence, 1,
        [(12, 127, 2, 4), (14, 50, 6, 8)])
    expected_events = [
        [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11, NOTE_OFF],
        [NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT,
         NOTE_OFF]]
    expected_melodies = []
    for events_list in expected_events:
      melody = melodies_lib.MonophonicMelody()
      melody.from_event_list(events_list)
      melody.steps_per_bar = 4
      expected_melodies.append(melody)
    expected_melodies[0].end_step = 8
    expected_melodies[1].end_step = 12

    unit = pipelines_common.MonophonicMelodyExtractor(
        min_bars=1, min_unique_pitches=1, gap_bars=1)
    self._unit_transform_test(unit, quantized_sequence, expected_melodies)
Esempio n. 5
0
  def testStepsPerBar(self):
    quantized = sequences_lib.QuantizedSequence()
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(16, quantized.steps_per_bar())

    self.note_sequence.time_signatures[0].numerator = 6
    self.note_sequence.time_signatures[0].denominator = 8
    quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
    self.assertEqual(12.0, quantized.steps_per_bar())
Esempio n. 6
0
 def transform(self, note_sequence):
     quantized_sequence = sequences_lib.QuantizedSequence()
     try:
         quantized_sequence.from_note_sequence(note_sequence,
                                               self.steps_per_beat)
         return [quantized_sequence]
     except sequences_lib.MultipleTimeSignatureException:
         tf.logging.debug('Multiple time signatures found in NoteSequence')
         return []
 def setUp(self):
     self.steps_per_beat = 4
     self.note_sequence = testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       bpm: 60}""")
     self.expected_quantized_sequence = sequences_lib.QuantizedSequence()
     self.expected_quantized_sequence.bpm = 60.0
     self.expected_quantized_sequence.steps_per_beat = self.steps_per_beat
Esempio n. 8
0
 def testRounding(self):
   testing_lib.add_track(
       self.note_sequence, 1,
       [(12, 100, 0.01, 0.24), (11, 100, 0.22, 0.55), (40, 100, 0.50, 0.75),
        (41, 100, 0.689, 1.18), (44, 100, 1.19, 1.69), (55, 100, 4.0, 4.01)])
   testing_lib.add_quantized_track(
       self.expected_quantized_sequence, 1,
       [(12, 100, 0, 1), (11, 100, 1, 2), (40, 100, 2, 3),
        (41, 100, 3, 5), (44, 100, 5, 7), (55, 100, 16, 17)])
   quantized = sequences_lib.QuantizedSequence()
   quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
   self.assertEqual(self.expected_quantized_sequence, quantized)
Esempio n. 9
0
 def testFromNoteSequence(self):
   testing_lib.add_track(
       self.note_sequence, 0,
       [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
        (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
   testing_lib.add_quantized_track(
       self.expected_quantized_sequence, 0,
       [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
        (55, 120, 16, 17), (52, 99, 19, 20)])
   quantized = sequences_lib.QuantizedSequence()
   quantized.from_note_sequence(self.note_sequence, self.steps_per_quarter)
   self.assertEqual(self.expected_quantized_sequence, quantized)
Esempio n. 10
0
 def transform(self, note_sequence):
     quantized_sequence = sequences_lib.QuantizedSequence()
     try:
         quantized_sequence.from_note_sequence(note_sequence,
                                               self.steps_per_quarter)
         return [quantized_sequence]
     except sequences_lib.MultipleTimeSignatureException:
         tf.logging.debug('Multiple time signatures found in NoteSequence')
         self._set_stats([
             statistics.Counter(
                 'sequences_discarded_because_multiple_time_signatures', 1)
         ])
         return []
Esempio n. 11
0
 def testMultiTrack(self):
     testing_lib.add_track(self.note_sequence, 0, [(12, 100, 1.0, 4.0),
                                                   (19, 100, 0.95, 3.0)])
     testing_lib.add_track(self.note_sequence, 3, [(12, 100, 1.0, 4.0),
                                                   (19, 100, 2.0, 5.0)])
     testing_lib.add_track(self.note_sequence, 7, [(12, 100, 1.0, 5.0),
                                                   (19, 100, 2.0, 4.0),
                                                   (24, 100, 3.0, 3.5)])
     testing_lib.add_quantized_track(self.expected_quantized_sequence, 0,
                                     [(12, 100, 4, 16), (19, 100, 4, 12)])
     testing_lib.add_quantized_track(self.expected_quantized_sequence, 3,
                                     [(12, 100, 4, 16), (19, 100, 8, 20)])
     testing_lib.add_quantized_track(self.expected_quantized_sequence, 7,
                                     [(12, 100, 4, 20), (19, 100, 8, 16),
                                      (24, 100, 12, 14)])
     quantized = sequences_lib.QuantizedSequence()
     quantized.from_note_sequence(self.note_sequence, self.steps_per_beat)
     self.assertEqual(self.expected_quantized_sequence, quantized)
Esempio n. 12
0
    def testDeepcopy(self):
        quantized = sequences_lib.QuantizedSequence()
        testing_lib.add_track(self.note_sequence, 0, [(12, 100, 0.01, 10.0),
                                                      (11, 55, 0.22, 0.50),
                                                      (40, 45, 2.50, 3.50),
                                                      (55, 120, 4.0, 4.01),
                                                      (52, 99, 4.75, 5.0)])
        quantized.from_note_sequence(self.note_sequence,
                                     self.steps_per_quarter)

        quantized_copy = quantized.deepcopy()
        self.assertEqual(quantized, quantized_copy)

        testing_lib.add_quantized_track(self.quantized, 1, [(12, 100, 4, 20),
                                                            (19, 100, 8, 16),
                                                            (24, 100, 12, 14)])

        self.assertNotEqual(quantized, quantized_copy)
Esempio n. 13
0
 def setUp(self):
     self.quantized_sequence = sequences_lib.QuantizedSequence()
     self.quantized_sequence.qpm = 60.0
     self.quantized_sequence.steps_per_quarter = 4
Esempio n. 14
0
    def _generate(self, generate_sequence_request):
        if len(generate_sequence_request.generator_options.generate_sections
               ) != 1:
            raise sequence_generator.SequenceGeneratorException(
                'This model supports only 1 generate_sections message, but got %s'
                % (len(generate_sequence_request.generator_options.
                       generate_sections)))

        generate_section = (
            generate_sequence_request.generator_options.generate_sections[0])
        primer_sequence = generate_sequence_request.input_sequence

        notes_by_end_time = sorted(primer_sequence.notes,
                                   key=lambda n: n.end_time)
        last_end_time = notes_by_end_time[
            -1].end_time if notes_by_end_time else 0
        if last_end_time > generate_section.start_time_seconds:
            raise sequence_generator.SequenceGeneratorException(
                'Got GenerateSection request for section that is before the end of '
                'the NoteSequence. This model can only extend sequences. '
                'Requested start time: %s, Final note end time: %s' %
                (generate_section.start_time_seconds,
                 notes_by_end_time[-1].end_time))

        # Quantize the priming sequence.
        quantized_sequence = sequences_lib.QuantizedSequence()
        quantized_sequence.from_note_sequence(primer_sequence,
                                              self._steps_per_beat)
        # Setting gap_bars to infinite ensures that the entire input will be used.
        extracted_melodies, _ = melodies_lib.extract_melodies(
            quantized_sequence,
            min_bars=0,
            min_unique_pitches=1,
            gap_bars=float('inf'),
            ignore_polyphonic_notes=True)
        assert len(extracted_melodies) <= 1

        bpm = (primer_sequence.tempos[0].bpm
               if primer_sequence and primer_sequence.tempos else
               melodies_lib.DEFAULT_BEATS_PER_MINUTE)
        start_step = self._seconds_to_steps(
            generate_section.start_time_seconds, bpm)
        end_step = self._seconds_to_steps(generate_section.end_time_seconds,
                                          bpm)

        if extracted_melodies and extracted_melodies[0]:
            melody = extracted_melodies[0]
        else:
            tf.logging.warn(
                'No melodies were extracted from the priming sequence. '
                'Melodies will be generated from scratch.')
            melody = melodies_lib.MonophonicMelody()
            melody.from_event_list([
                random.randint(self._melody_encoder_decoder.min_note,
                               self._melody_encoder_decoder.max_note)
            ])
            start_step += 1

        transpose_amount = melody.squash(
            self._melody_encoder_decoder.min_note,
            self._melody_encoder_decoder.max_note,
            self._melody_encoder_decoder.transpose_to_key)

        # Ensure that the melody extends up to the step we want to start generating.
        melody.set_length(start_step)

        inputs = self._session.graph.get_collection('inputs')[0]
        initial_state = self._session.graph.get_collection('initial_state')[0]
        final_state = self._session.graph.get_collection('final_state')[0]
        softmax = self._session.graph.get_collection('softmax')[0]

        final_state_ = None
        for i in range(end_step - len(melody)):
            if i == 0:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody], full_length=True)
                initial_state_ = self._session.run(initial_state)
            else:
                inputs_ = self._melody_encoder_decoder.get_inputs_batch(
                    [melody])
                initial_state_ = final_state_

            feed_dict = {inputs: inputs_, initial_state: initial_state_}
            final_state_, softmax_ = self._session.run([final_state, softmax],
                                                       feed_dict)
            self._melody_encoder_decoder.extend_melodies([melody], softmax_)

        melody.transpose(-transpose_amount)

        generate_response = generator_pb2.GenerateSequenceResponse()
        generate_response.generated_sequence.CopyFrom(
            melody.to_sequence(bpm=bpm))
        return generate_response
Esempio n. 15
0
 def setUp(self):
     self.quantized_sequence = sequences_lib.QuantizedSequence()
     self.quantized_sequence.bpm = 60.0
     self.quantized_sequence.steps_per_beat = 4