def midi_files_to_sequence_proto(midi_files, batch_size, input_size):
    all_sequences = filter(lambda x: x != None, [
        midi_file_to_sequence_proto(midi_file, batch_size, input_size)
        for midi_file in midi_files
    ])
    examples = []
    #print("All our events are: ", all_events)

    unit = melody_pipelines.MelodyExtractor(min_bars=1,
                                            min_unique_pitches=1,
                                            gap_bars=1)

    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)

    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
        magenta.music.MelodyOneHotEncoding(0, 128))

    for note_sequence in all_sequences:
        #quantized_sequence = quantize_note_sequence(
        # note_sequence, steps_per_quarter=1)
        #quantized_sequence = mm.quantize_note_sequence(note_sequence, steps_per_quarter=1)
        #outputs = unit.transform(quantized_sequence)
        #single_quant = quantizer.transform(note_sequence)
        #print("single quant seq is: ", single_quant)

        # quantized = quantizer.transform(note_sequence)
        #print("single quant is: ", single_quant)
        single_quant = note_sequence
        melody = melody_extractor.transform(single_quant)
        print("outputs/melody is ", outputs)

        #melody = outputs
        #   self.config.min_note,
        #   self.config.max_note,
        #   self.config.transpose_to_key)
        melody.squash(0, 128, 0)
        one_hot = one_hot_encoding.encode(melody)
        examples.append(one_hot)

        #seq_example =
        #seq_example = make_sequence_example(inputs, labels)
        #examples.append(seq_example)
    #   melody = melodies_lib.Melody(events.notes)
    #   melody.squash(0,128,0)
    #   examples.append(self.med.encode(melody))

    print("done with a batch!!!")
    return get_padded_batch(examples, batch_size, input_size)
    def testMelodyRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0),
                               (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0),
                               (53, 99, 11.1, 14.1)])

        quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4)
        melody_extractor = melody_pipelines.MelodyExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False)
        one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MelodyOneHotEncoding(self.config.min_note,
                                               self.config.max_note))
        quantized = quantizer.transform(note_sequence)[0]
        melody = melody_extractor.transform(quantized)[0]
        melody.squash(self.config.min_note, self.config.max_note,
                      self.config.transpose_to_key)
        one_hot = one_hot_encoding.encode(melody)
        expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

        pipeline_inst = melody_rnn_pipeline.get_pipeline(self.config,
                                                         eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
 def testMelodyExtractor(self):
     note_sequence = common_testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       qpm: 60}""")
     testing_lib.add_track_to_sequence(note_sequence, 0, [(12, 100, 2, 4),
                                                          (11, 1, 6, 7)])
     testing_lib.add_track_to_sequence(note_sequence, 1, [(12, 127, 2, 4),
                                                          (14, 50, 6, 8)])
     quantized_sequence = sequences_lib.quantize_note_sequence(
         note_sequence, steps_per_quarter=1)
     expected_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_melodies = []
     for events_list in expected_events:
         melody = melodies_lib.Melody(events_list,
                                      steps_per_quarter=1,
                                      steps_per_bar=4)
         expected_melodies.append(melody)
     unit = melody_pipelines.MelodyExtractor(min_bars=1,
                                             min_unique_pitches=1,
                                             gap_bars=1)
     self._unit_transform_test(unit, quantized_sequence, expected_melodies)
예제 #4
0
def get_pipeline(config):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A MelodyRnnConfig object.

  Returns:
    A pipeline.Pipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)
    encoder_pipeline = EncoderPipeline(config)
    partitioner = pipelines_common.RandomPartition(
        tf.train.SequenceExample, ['eval_melodies', 'training_melodies'],
        [FLAGS.eval_ratio])

    dag = {
        quantizer: dag_pipeline.Input(music_pb2.NoteSequence),
        melody_extractor: quantizer,
        encoder_pipeline: melody_extractor,
        partitioner: encoder_pipeline,
        dag_pipeline.Output(): partitioner
    }
    return dag_pipeline.DAGPipeline(dag)
예제 #5
0
def get_pipeline(config, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A MelodyRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_melodies', 'training_melodies'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = note_sequence_pipelines.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7, max_steps=512, min_unique_pitches=5,
        gap_bars=1.0, ignore_polyphonic_notes=True,
        name='MelodyExtractor_' + mode)
    encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_melodies']
    dag[quantizer] = time_change_splitter
    dag[melody_extractor] = quantizer
    dag[encoder_pipeline] = melody_extractor
    dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio=0.0):
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_melodies', 'training_melodies'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        repeat_sequence = RepeatSequence(min_duration=16,
                                         name='RepeatSequence_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            (0, ), name='TranspositionPipeline_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_quarter=config.steps_per_quarter,
            name='Quantizer_' + mode)
        melody_extractor = melody_pipelines.MelodyExtractor(
            min_bars=7,
            max_steps=512,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=True,
            name='MelodyExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_melodies']
        dag[repeat_sequence] = time_change_splitter
        dag[quantizer] = repeat_sequence
        dag[transposition_pipeline] = quantizer
        dag[melody_extractor] = transposition_pipeline
        dag[encoder_pipeline] = melody_extractor
        dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
  def testMelodyRNNPipeline(self):
    FLAGS.eval_ratio = 0.0
    note_sequence = magenta.common.testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
    magenta.music.testing_lib.add_track_to_sequence(
        note_sequence, 0,
        [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0), (40, 45, 5.1, 8.0),
         (55, 120, 8.1, 11.0), (53, 99, 11.1, 14.1)])

    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7, min_unique_pitches=5, gap_bars=1.0,
        ignore_polyphonic_notes=False)
    one_hot_encoder = magenta.music.OneHotMelodyEncoderDecoder(0, 127, 0)
    quantized = quantizer.transform(note_sequence)[0]
    print quantized.tracks
    melody = melody_extractor.transform(quantized)[0]
    one_hot = one_hot_encoder.squash_and_encode(melody)
    print one_hot
    expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

    pipeline_inst = melody_rnn_create_dataset.get_pipeline(one_hot_encoder)
    result = pipeline_inst.transform(note_sequence)
    self.assertEqual(expected_result, result)
예제 #8
0
 def testMelodyExtractor(self):
     quantized_sequence = sequences_lib.QuantizedSequence()
     quantized_sequence.steps_per_quarter = 1
     testing_lib.add_quantized_track_to_sequence(quantized_sequence, 0,
                                                 [(12, 100, 2, 4),
                                                  (11, 1, 6, 7)])
     testing_lib.add_quantized_track_to_sequence(quantized_sequence, 1,
                                                 [(12, 127, 2, 4),
                                                  (14, 50, 6, 8)])
     expected_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_melodies = []
     for events_list in expected_events:
         melody = melodies_lib.Melody(events_list,
                                      steps_per_quarter=1,
                                      steps_per_bar=4)
         expected_melodies.append(melody)
     unit = melody_pipelines.MelodyExtractor(min_bars=1,
                                             min_unique_pitches=1,
                                             gap_bars=1)
     self._unit_transform_test(unit, quantized_sequence, expected_melodies)