コード例 #1
0
    def testMelodyRNNPipeline(self):
        FLAGS.eval_ratio = 0.0
        note_sequence = testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        testing_lib.add_track(note_sequence, 0, [(12, 100, 0.00, 2.0),
                                                 (11, 55, 2.1, 5.0),
                                                 (40, 45, 5.1, 8.0),
                                                 (55, 120, 8.1, 11.0),
                                                 (53, 99, 11.1, 14.1)])

        quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
        melody_extractor = pipelines_common.MonophonicMelodyExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False)
        one_hot_encoder = melodies_lib.OneHotEncoderDecoder(0, 127, 0)
        quantized = quantizer.transform(note_sequence)[0]
        print quantized.tracks
        melody = melody_extractor.transform(quantized)[0]
        one_hot = one_hot_encoder.encode(melody)
        print one_hot
        expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

        pipeline_inst = melody_rnn_create_dataset.get_pipeline(one_hot_encoder)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
コード例 #2
0
  def testMelodyRNNPipeline(self):
    note_sequence = testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          bpm: 120}""")
    testing_lib.add_track(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    quantizer = pipelines_common.Quantizer(steps_per_beat=4)
    melody_extractor = pipelines_common.MonophonicMelodyExtractor(
        min_bars=7, min_unique_pitches=5,
        gap_bars=1.0)
    one_hot_encoder = melody_rnn_create_dataset.OneHotEncoder()
    quantized = quantizer.transform(note_sequence)[0]
    melody = melody_extractor.transform(quantized)[0]
    one_hot = one_hot_encoder.transform(melody)[0]
    expected_result = {'melody_rnn_train': [one_hot], 'melody_rnn_eval': []}

    pipeline_inst = melody_rnn_create_dataset.MelodyRNNPipeline(eval_ratio=0)
    result = pipeline_inst.transform(note_sequence)
    self.assertEqual(expected_result, result)
コード例 #3
0
    def testQuantizer(self):
        steps_per_quarter = 4
        note_sequence = testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track(note_sequence, 0, [(12, 100, 0.01, 10.0),
                                                 (11, 55, 0.22, 0.50),
                                                 (40, 45, 2.50, 3.50),
                                                 (55, 120, 4.0, 4.01),
                                                 (52, 99, 4.75, 5.0)])
        expected_quantized_sequence = sequences_lib.QuantizedSequence()
        expected_quantized_sequence.qpm = 60.0
        expected_quantized_sequence.steps_per_quarter = steps_per_quarter
        testing_lib.add_quantized_track(expected_quantized_sequence, 0,
                                        [(12, 100, 0, 40), (11, 55, 1, 2),
                                         (40, 45, 10, 14), (55, 120, 16, 17),
                                         (52, 99, 19, 20)])

        unit = pipelines_common.Quantizer(steps_per_quarter)
        self._unit_transform_test(unit, note_sequence,
                                  [expected_quantized_sequence])
コード例 #4
0
  def testQuantizer(self):
    steps_per_beat = 4
    note_sequence = testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          bpm: 60}""")
    testing_lib.add_track(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    expected_quantized_sequence = sequences_lib.QuantizedSequence()
    expected_quantized_sequence.bpm = 60.0
    expected_quantized_sequence.steps_per_beat = steps_per_beat
    testing_lib.add_quantized_track(
        expected_quantized_sequence, 0,
        [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
         (55, 120, 16, 17), (52, 99, 19, 20)])

    unit = pipelines_common.Quantizer(steps_per_beat)
    self._unit_transform_test(unit, note_sequence,
                              [expected_quantized_sequence])
コード例 #5
0
 def setUp(self):
     self.steps_per_beat = 4
     self.note_sequence = testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       bpm: 60}""")
     self.expected_quantized_sequence = sequences_lib.QuantizedSequence()
     self.expected_quantized_sequence.bpm = 60.0
     self.expected_quantized_sequence.steps_per_beat = self.steps_per_beat
コード例 #6
0
 def setUp(self):
   self.steps_per_beat = 4
   self.note_sequence = testing_lib.parse_test_proto(
       music_pb2.NoteSequence,
       """
       time_signatures: {
         numerator: 4
         denominator: 4}
       tempos: {
         bpm: 60}""")
   self.expected_quantized_sequence = sequences_lib.QuantizedSequence()
   self.expected_quantized_sequence.bpm = 60.0
   self.expected_quantized_sequence.steps_per_beat = self.steps_per_beat