コード例 #1
0
def get_pipeline(melody_encoder_decoder):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    melody_encoder_decoder: A melodies_lib.MelodyEncoderDecoder object.

  Returns:
    A pipeline.Pipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_beat=4)
    melody_extractor = pipelines_common.MonophonicMelodyExtractor(
        min_bars=7,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)
    encoder_pipeline = EncoderPipeline(melody_encoder_decoder)
    partitioner = pipelines_common.RandomPartition(
        tf.train.SequenceExample, ['eval_melodies', 'training_melodies'],
        [FLAGS.eval_ratio])

    dag = {
        quantizer: dag_pipeline.Input(music_pb2.NoteSequence),
        melody_extractor: quantizer,
        encoder_pipeline: melody_extractor,
        partitioner: encoder_pipeline,
        dag_pipeline.Output(): partitioner
    }
    return dag_pipeline.DAGPipeline(dag)
コード例 #2
0
    def testMelodyRNNPipeline(self):
        FLAGS.eval_ratio = 0.0
        note_sequence = testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        testing_lib.add_track(note_sequence, 0, [(12, 100, 0.00, 2.0),
                                                 (11, 55, 2.1, 5.0),
                                                 (40, 45, 5.1, 8.0),
                                                 (55, 120, 8.1, 11.0),
                                                 (53, 99, 11.1, 14.1)])

        quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
        melody_extractor = pipelines_common.MonophonicMelodyExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False)
        one_hot_encoder = melodies_lib.OneHotEncoderDecoder(0, 127, 0)
        quantized = quantizer.transform(note_sequence)[0]
        print quantized.tracks
        melody = melody_extractor.transform(quantized)[0]
        one_hot = one_hot_encoder.encode(melody)
        print one_hot
        expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

        pipeline_inst = melody_rnn_create_dataset.get_pipeline(one_hot_encoder)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
コード例 #3
0
    def testDrumsRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence,
            0, [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0),
                (41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0),
                (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1),
                (51, 40, 12.6, 13.0), (55, 100, 14.1, 15.0),
                (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)],
            is_drum=True)

        quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
        drums_extractor = drum_pipelines.DrumsExtractor(min_bars=7,
                                                        gap_bars=1.0)
        one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MultiDrumOneHotEncoding())
        quantized = quantizer.transform(note_sequence)[0]
        drums = drums_extractor.transform(quantized)[0]
        one_hot = one_hot_encoding.encode(drums)
        expected_result = {
            'training_drum_tracks': [one_hot],
            'eval_drum_tracks': []
        }

        pipeline_inst = drums_rnn_create_dataset.get_pipeline(self.config,
                                                              eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
コード例 #4
0
def get_pipeline(config, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A DrumsRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_drum_tracks', 'training_drum_tracks'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = pipelines_common.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = pipelines_common.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    drums_extractor = drum_pipelines.DrumsExtractor(
        min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractor_' + mode)
    encoder_pipeline = encoder_decoder.EncoderPipeline(
        magenta.music.DrumTrack, config.encoder_decoder,
        name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_drum_tracks']
    dag[quantizer] = time_change_splitter
    dag[drums_extractor] = quantizer
    dag[encoder_pipeline] = drums_extractor
    dag[dag_pipeline.DagOutput(mode + '_drum_tracks')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)
コード例 #5
0
    def testQuantizer(self):
        steps_per_quarter = 4
        note_sequence = common_testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track_to_sequence(note_sequence, 0,
                                          [(12, 100, 0.01, 10.0),
                                           (11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01),
                                           (52, 99, 4.75, 5.0)])
        expected_quantized_sequence = sequences_lib.QuantizedSequence()
        expected_quantized_sequence.qpm = 60.0
        expected_quantized_sequence.steps_per_quarter = steps_per_quarter
        testing_lib.add_quantized_track_to_sequence(
            expected_quantized_sequence, 0,
            [(12, 100, 0, 40), (11, 55, 1, 2), (40, 45, 10, 14),
             (55, 120, 16, 17), (52, 99, 19, 20)])

        unit = pipelines_common.Quantizer(steps_per_quarter)
        self._unit_transform_test(unit, note_sequence,
                                  [expected_quantized_sequence])
コード例 #6
0
  def testMelodyRNNPipeline(self):
    note_sequence = magenta.common.testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
    magenta.music.testing_lib.add_track_to_sequence(
        note_sequence, 0,
        [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0), (40, 45, 5.1, 8.0),
         (55, 120, 8.1, 11.0), (53, 99, 11.1, 14.1)])

    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7, min_unique_pitches=5, gap_bars=1.0,
        ignore_polyphonic_notes=False)
    one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
        magenta.music.MelodyOneHotEncoding(
            self.config.min_note, self.config.max_note))
    quantized = quantizer.transform(note_sequence)[0]
    melody = melody_extractor.transform(quantized)[0]
    melody.squash(
        self.config.min_note,
        self.config.max_note,
        self.config.transpose_to_key)
    one_hot = one_hot_encoding.encode(melody)
    expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

    pipeline_inst = melody_rnn_create_dataset.get_pipeline(self.config,
                                                           eval_ratio=0.0)
    result = pipeline_inst.transform(note_sequence)
    self.assertEqual(expected_result, result)
コード例 #7
0
  def testMelodyRNNPipeline(self):
    note_sequence = testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          bpm: 120}""")
    testing_lib.add_track(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])

    quantizer = pipelines_common.Quantizer(steps_per_beat=4)
    melody_extractor = pipelines_common.MonophonicMelodyExtractor(
        min_bars=7, min_unique_pitches=5,
        gap_bars=1.0)
    one_hot_encoder = melody_rnn_create_dataset.OneHotEncoder()
    quantized = quantizer.transform(note_sequence)[0]
    melody = melody_extractor.transform(quantized)[0]
    one_hot = one_hot_encoder.transform(melody)[0]
    expected_result = {'melody_rnn_train': [one_hot], 'melody_rnn_eval': []}

    pipeline_inst = melody_rnn_create_dataset.MelodyRNNPipeline(eval_ratio=0)
    result = pipeline_inst.transform(note_sequence)
    self.assertEqual(expected_result, result)
コード例 #8
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A DrumsRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    drums_extractor_train = drum_pipelines.DrumsExtractor(
        min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorTrain')
    drums_extractor_eval = drum_pipelines.DrumsExtractor(
        min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorEval')
    encoder_pipeline_train = EncoderPipeline(config,
                                             name='EncoderPipelineTrain')
    encoder_pipeline_eval = EncoderPipeline(config, name='EncoderPipelineEval')
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'],
        [eval_ratio])

    dag = {
        quantizer: dag_pipeline.Input(music_pb2.NoteSequence),
        partitioner: quantizer,
        drums_extractor_train: partitioner['training_drum_tracks'],
        drums_extractor_eval: partitioner['eval_drum_tracks'],
        encoder_pipeline_train: drums_extractor_train,
        encoder_pipeline_eval: drums_extractor_eval,
        dag_pipeline.Output('training_drum_tracks'): encoder_pipeline_train,
        dag_pipeline.Output('eval_drum_tracks'): encoder_pipeline_eval
    }
    return dag_pipeline.DAGPipeline(dag)
コード例 #9
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A MelodyRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
    A id_pipeline.IDPipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)
    id_pipeline = pipelines_common.IDPipeline()
    encoder_pipeline = EncoderPipeline(config)
    partitioner = pipelines_common.RandomPartition(
        tf.train.SequenceExample, ['eval_melodies', 'training_melodies'],
        [eval_ratio])

    dag = {
        quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence),
        melody_extractor: quantizer,
        id_pipeline: melody_extractor,
        encoder_pipeline: id_pipeline,
        partitioner: encoder_pipeline,
        dag_pipeline.DagOutput(): partitioner
    }

    return dag_pipeline.DAGPipeline(dag), id_pipeline
コード例 #10
0
def midi_files_to_sequence_proto(midi_files, batch_size, input_size):
    all_sequences = filter(lambda x: x != None, [
        midi_file_to_sequence_proto(midi_file, batch_size, input_size)
        for midi_file in midi_files
    ])
    examples = []
    #print("All our events are: ", all_events)

    unit = melody_pipelines.MelodyExtractor(min_bars=1,
                                            min_unique_pitches=1,
                                            gap_bars=1)

    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7,
        min_unique_pitches=5,
        gap_bars=1.0,
        ignore_polyphonic_notes=False)

    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
        magenta.music.MelodyOneHotEncoding(0, 128))

    for note_sequence in all_sequences:
        #quantized_sequence = quantize_note_sequence(
        # note_sequence, steps_per_quarter=1)
        #quantized_sequence = mm.quantize_note_sequence(note_sequence, steps_per_quarter=1)
        #outputs = unit.transform(quantized_sequence)
        #single_quant = quantizer.transform(note_sequence)
        #print("single quant seq is: ", single_quant)

        # quantized = quantizer.transform(note_sequence)
        #print("single quant is: ", single_quant)
        single_quant = note_sequence
        melody = melody_extractor.transform(single_quant)
        print("outputs/melody is ", outputs)

        #melody = outputs
        #   self.config.min_note,
        #   self.config.max_note,
        #   self.config.transpose_to_key)
        melody.squash(0, 128, 0)
        one_hot = one_hot_encoding.encode(melody)
        examples.append(one_hot)

        #seq_example =
        #seq_example = make_sequence_example(inputs, labels)
        #examples.append(seq_example)
    #   melody = melodies_lib.Melody(events.notes)
    #   melody.squash(0,128,0)
    #   examples.append(self.med.encode(melody))

    print("done with a batch!!!")
    return get_padded_batch(examples, batch_size, input_size)
コード例 #11
0
def get_pipeline(config, steps_per_quarter, min_steps, max_steps, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    steps_per_quarter: How many steps per quarter to use when quantizing.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    quantizer = pipelines_common.Quantizer(steps_per_quarter=steps_per_quarter)
    # Transpose up to a major third in either direction.
    # Because our current dataset is Bach chorales, transposing more than a major
    # third in either direction probably doesn't makes sense (e.g., because it is
    # likely to exceed normal singing range).
    transposition_range = range(-4, 5)
    transposition_pipeline_train = sequences_lib.TranspositionPipeline(
        transposition_range, name='TranspositionPipelineTrain')
    transposition_pipeline_eval = sequences_lib.TranspositionPipeline(
        transposition_range, name='TranspositionPipelineEval')
    poly_extractor_train = PolyphonicSequenceExtractor(
        min_steps=min_steps, max_steps=max_steps, name='PolyExtractorTrain')
    poly_extractor_eval = PolyphonicSequenceExtractor(min_steps=min_steps,
                                                      max_steps=max_steps,
                                                      name='PolyExtractorEval')
    encoder_pipeline_train = encoder_decoder.EncoderPipeline(
        polyphony_lib.PolyphonicSequence,
        config.encoder_decoder,
        name='EncoderPipelineTrain')
    encoder_pipeline_eval = encoder_decoder.EncoderPipeline(
        polyphony_lib.PolyphonicSequence,
        config.encoder_decoder,
        name='EncoderPipelineEval')
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'],
        [eval_ratio])

    dag = {
        quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence),
        partitioner: quantizer,
        transposition_pipeline_train: partitioner['training_poly_tracks'],
        transposition_pipeline_eval: partitioner['eval_poly_tracks'],
        poly_extractor_train: transposition_pipeline_train,
        poly_extractor_eval: transposition_pipeline_eval,
        encoder_pipeline_train: poly_extractor_train,
        encoder_pipeline_eval: poly_extractor_eval,
        dag_pipeline.DagOutput('training_poly_tracks'): encoder_pipeline_train,
        dag_pipeline.DagOutput('eval_poly_tracks'): encoder_pipeline_eval
    }
    return dag_pipeline.DAGPipeline(dag)
コード例 #12
0
def get_pipeline(config, steps_per_quarter, min_steps, max_steps, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    steps_per_quarter: How many steps per quarter to use when quantizing.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    # Transpose up to a major third in either direction.
    # Because our current dataset is Bach chorales, transposing more than a major
    # third in either direction probably doesn't makes sense (e.g., because it is
    # likely to exceed normal singing range).
    transposition_range = range(-4, 5)

    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = pipelines_common.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = pipelines_common.Quantizer(
            steps_per_quarter=steps_per_quarter, name='Quantizer_' + mode)
        transposition_pipeline = sequences_lib.TranspositionPipeline(
            transposition_range, name='TranspositionPipeline_' + mode)
        poly_extractor = PolyphonicSequenceExtractor(min_steps=min_steps,
                                                     max_steps=max_steps,
                                                     name='PolyExtractor_' +
                                                     mode)
        encoder_pipeline = encoder_decoder.EncoderPipeline(
            polyphony_lib.PolyphonicSequence,
            config.encoder_decoder,
            name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_poly_tracks']
        dag[quantizer] = time_change_splitter
        dag[transposition_pipeline] = quantizer
        dag[poly_extractor] = transposition_pipeline
        dag[encoder_pipeline] = poly_extractor
        dag[dag_pipeline.DagOutput(mode + '_poly_tracks')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
コード例 #13
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An ImprovRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    all_transpositions = config.transpose_to_key is None
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    lead_sheet_extractor_train = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=3,
        gap_bars=1.0,
        ignore_polyphonic_notes=False,
        all_transpositions=all_transpositions,
        name='LeadSheetExtractorTrain')
    lead_sheet_extractor_eval = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=3,
        gap_bars=1.0,
        ignore_polyphonic_notes=False,
        all_transpositions=all_transpositions,
        name='LeadSheetExtractorEval')
    encoder_pipeline_train = EncoderPipeline(config,
                                             name='EncoderPipelineTrain')
    encoder_pipeline_eval = EncoderPipeline(config, name='EncoderPipelineEval')
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_lead_sheets', 'training_lead_sheets'],
        [eval_ratio])

    dag = {
        quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence),
        partitioner: quantizer,
        lead_sheet_extractor_train: partitioner['training_lead_sheets'],
        lead_sheet_extractor_eval: partitioner['eval_lead_sheets'],
        encoder_pipeline_train: lead_sheet_extractor_train,
        encoder_pipeline_eval: lead_sheet_extractor_eval,
        dag_pipeline.DagOutput('training_lead_sheets'): encoder_pipeline_train,
        dag_pipeline.DagOutput('eval_lead_sheets'): encoder_pipeline_eval
    }
    return dag_pipeline.DAGPipeline(dag)
コード例 #14
0
    def testMelodyRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0),
                               (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0),
                               (53, 99, 11.1, 14.1)])
        magenta.music.testing_lib.add_chords_to_sequence(
            note_sequence, [('N.C.', 0.0), ('Am9', 5.0), ('D7', 10.0)])

        quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
        lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False,
            all_transpositions=False)
        conditional_encoding = magenta.music.ConditionalEventSequenceEncoderDecoder(
            magenta.music.OneHotEventSequenceEncoderDecoder(
                magenta.music.MajorMinorChordOneHotEncoding()),
            magenta.music.OneHotEventSequenceEncoderDecoder(
                magenta.music.MelodyOneHotEncoding(self.config.min_note,
                                                   self.config.max_note)))
        quantized = quantizer.transform(note_sequence)[0]
        lead_sheet = lead_sheet_extractor.transform(quantized)[0]
        lead_sheet.squash(self.config.min_note, self.config.max_note,
                          self.config.transpose_to_key)
        encoded = conditional_encoding.encode(lead_sheet.chords,
                                              lead_sheet.melody)
        expected_result = {
            'training_lead_sheets': [encoded],
            'eval_lead_sheets': []
        }

        pipeline_inst = improv_rnn_create_dataset.get_pipeline(self.config,
                                                               eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
コード例 #15
0
 def __init__(self, melody_encoder_decoder, eval_ratio):
     self.training_set_name = 'training_melodies'
     self.eval_set_name = 'eval_melodies'
     super(MelodyRNNPipeline,
           self).__init__(input_type=music_pb2.NoteSequence,
                          output_type={
                              self.training_set_name:
                              tf.train.SequenceExample,
                              self.eval_set_name: tf.train.SequenceExample
                          })
     self.eval_ratio = eval_ratio
     self.quantizer = pipelines_common.Quantizer(steps_per_beat=4)
     self.melody_extractor = pipelines_common.MonophonicMelodyExtractor(
         min_bars=7,
         min_unique_pitches=5,
         gap_bars=1.0,
         ignore_polyphonic_notes=False)
     self.encoder_unit = EncoderPipeline(melody_encoder_decoder)
     self.stats_dict = {}
コード例 #16
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An ImprovRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    all_transpositions = config.transpose_to_key is None
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_lead_sheets', 'training_lead_sheets'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = pipelines_common.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = pipelines_common.Quantizer(
            steps_per_quarter=config.steps_per_quarter,
            name='Quantizer_' + mode)
        lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
            min_bars=7,
            max_steps=512,
            min_unique_pitches=3,
            gap_bars=1.0,
            ignore_polyphonic_notes=False,
            all_transpositions=all_transpositions,
            name='LeadSheetExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_lead_sheets']
        dag[quantizer] = time_change_splitter
        dag[lead_sheet_extractor] = quantizer
        dag[encoder_pipeline] = lead_sheet_extractor
        dag[dag_pipeline.DagOutput(mode + '_lead_sheets')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
コード例 #17
0
def get_pipeline(config, steps_per_quarter, min_steps, max_steps, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    steps_per_quarter: How many steps per quarter to use when quantizing.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  quantizer = pipelines_common.Quantizer(steps_per_quarter=steps_per_quarter)
  poly_extractor_train = PolyphonicSequenceExtractor(
      min_steps=min_steps, max_steps=max_steps, name='PolyExtractorTrain')
  poly_extractor_eval = PolyphonicSequenceExtractor(
      min_steps=min_steps, max_steps=max_steps, name='PolyExtractorEval')
  encoder_pipeline_train = encoder_decoder.EncoderPipeline(
      polyphony_lib.PolyphonicSequence, config.encoder_decoder,
      name='EncoderPipelineTrain')
  encoder_pipeline_eval = encoder_decoder.EncoderPipeline(
      polyphony_lib.PolyphonicSequence, config.encoder_decoder,
      name='EncoderPipelineEval')
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_poly_tracks', 'training_poly_tracks'],
      [eval_ratio])

  dag = {quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence),
         partitioner: quantizer,
         poly_extractor_train: partitioner['training_poly_tracks'],
         poly_extractor_eval: partitioner['eval_poly_tracks'],
         encoder_pipeline_train: poly_extractor_train,
         encoder_pipeline_eval: poly_extractor_eval,
         dag_pipeline.DagOutput('training_poly_tracks'): encoder_pipeline_train,
         dag_pipeline.DagOutput('eval_poly_tracks'): encoder_pipeline_eval}
  return dag_pipeline.DAGPipeline(dag)
コード例 #18
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A MelodyRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_melodies', 'training_melodies'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = pipelines_common.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = pipelines_common.Quantizer(steps_per_quarter=4,
                                               name='Quantizer_' + mode)
        melody_extractor = melody_pipelines.MelodyExtractor(
            min_bars=7,
            max_steps=512,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False,
            name='MelodyExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_melodies']
        dag[quantizer] = time_change_splitter
        dag[melody_extractor] = quantizer
        dag[encoder_pipeline] = melody_extractor
        dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, min_steps, max_steps, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_pianoroll_tracks', 'training_pianoroll_tracks'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = pipelines_common.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = pipelines_common.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    pianoroll_extractor = PianorollSequenceExtractor(
        min_steps=min_steps, max_steps=max_steps,
        name='PianorollExtractor_' + mode)
    encoder_pipeline = mm.EncoderPipeline(
        mm.PianorollSequence, config.encoder_decoder,
        name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_pianoroll_tracks']
    dag[quantizer] = time_change_splitter
    dag[pianoroll_extractor] = quantizer
    dag[encoder_pipeline] = pianoroll_extractor
    dag[dag_pipeline.DagOutput(mode + '_pianoroll_tracks')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)