def get_pipeline(melody_encoder_decoder):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    melody_encoder_decoder: A magenta.music.MelodyEncoderDecoder object.

  Returns:
    A pipeline.Pipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    melody_extractor = pipelines_common.MelodyExtractor(
        min_bars=7,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)
    encoder_pipeline = EncoderPipeline(melody_encoder_decoder)
    partitioner = pipelines_common.RandomPartition(
        tf.train.SequenceExample, ['eval_melodies', 'training_melodies'],
        [FLAGS.eval_ratio])

    dag = {
        quantizer: dag_pipeline.Input(music_pb2.NoteSequence),
        melody_extractor: quantizer,
        encoder_pipeline: melody_extractor,
        partitioner: encoder_pipeline,
        dag_pipeline.Output(): partitioner
    }
    return dag_pipeline.DAGPipeline(dag)
Exemplo n.º 2
0
    def testMelodyRNNPipeline(self):
        FLAGS.eval_ratio = 0.0
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0),
                               (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0),
                               (53, 99, 11.1, 14.1)])

        quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
        melody_extractor = pipelines_common.MelodyExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False)
        one_hot_encoder = magenta.music.OneHotMelodyEncoderDecoder(0, 127, 0)
        quantized = quantizer.transform(note_sequence)[0]
        print quantized.tracks
        melody = melody_extractor.transform(quantized)[0]
        one_hot = one_hot_encoder.squash_and_encode(melody)
        print one_hot
        expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

        pipeline_inst = melody_rnn_create_dataset.get_pipeline(one_hot_encoder)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
Exemplo n.º 3
0
 def testMelodyExtractor(self):
     quantized_sequence = sequences_lib.QuantizedSequence()
     quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track_to_sequence(quantized_sequence, 0,
                                                 [(12, 100, 2, 4),
                                                  (11, 1, 6, 7)])
     testing_lib.add_quantized_track_to_sequence(quantized_sequence, 1,
                                                 [(12, 127, 2, 4),
                                                  (14, 50, 6, 8)])
     expected_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_melodies = []
     for events_list in expected_events:
         melody = melodies_lib.Melody(events_list,
                                      steps_per_quarter=1,
                                      steps_per_bar=4)
         expected_melodies.append(melody)
     unit = pipelines_common.MelodyExtractor(min_bars=1,
                                             min_unique_pitches=1,
                                             gap_bars=1)
     self._unit_transform_test(unit, quantized_sequence, expected_melodies)