def get_pipeline(config, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An ImprovRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  all_transpositions = config.transpose_to_key is None
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_lead_sheets', 'training_lead_sheets'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = note_sequence_pipelines.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7, max_steps=512, min_unique_pitches=3, gap_bars=1.0,
        ignore_polyphonic_notes=False, all_transpositions=all_transpositions,
        name='LeadSheetExtractor_' + mode)
    encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_lead_sheets']
    dag[quantizer] = time_change_splitter
    dag[lead_sheet_extractor] = quantizer
    dag[encoder_pipeline] = lead_sheet_extractor
    dag[dag_pipeline.DagOutput(mode + '_lead_sheets')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)
Exemple #2
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An ImprovRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    all_transpositions = config.transpose_to_key is None
    quantizer = pipelines_common.Quantizer(steps_per_quarter=4)
    lead_sheet_extractor_train = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=3,
        gap_bars=1.0,
        ignore_polyphonic_notes=False,
        all_transpositions=all_transpositions,
        name='LeadSheetExtractorTrain')
    lead_sheet_extractor_eval = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7,
        max_steps=512,
        min_unique_pitches=3,
        gap_bars=1.0,
        ignore_polyphonic_notes=False,
        all_transpositions=all_transpositions,
        name='LeadSheetExtractorEval')
    encoder_pipeline_train = EncoderPipeline(config,
                                             name='EncoderPipelineTrain')
    encoder_pipeline_eval = EncoderPipeline(config, name='EncoderPipelineEval')
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_lead_sheets', 'training_lead_sheets'],
        [eval_ratio])

    dag = {
        quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence),
        partitioner: quantizer,
        lead_sheet_extractor_train: partitioner['training_lead_sheets'],
        lead_sheet_extractor_eval: partitioner['eval_lead_sheets'],
        encoder_pipeline_train: lead_sheet_extractor_train,
        encoder_pipeline_eval: lead_sheet_extractor_eval,
        dag_pipeline.DagOutput('training_lead_sheets'): encoder_pipeline_train,
        dag_pipeline.DagOutput('eval_lead_sheets'): encoder_pipeline_eval
    }
    return dag_pipeline.DAGPipeline(dag)
    def testMelodyRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            note_seq.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        note_seq.testing_lib.add_track_to_sequence(note_sequence, 0,
                                                   [(12, 100, 0.00, 2.0),
                                                    (11, 55, 2.1, 5.0),
                                                    (40, 45, 5.1, 8.0),
                                                    (55, 120, 8.1, 11.0),
                                                    (53, 99, 11.1, 14.1)])
        note_seq.testing_lib.add_chords_to_sequence(note_sequence,
                                                    [('N.C.', 0.0),
                                                     ('Am9', 5.0),
                                                     ('D7', 10.0)])

        quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4)
        lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False,
            all_transpositions=False)
        conditional_encoding = note_seq.ConditionalEventSequenceEncoderDecoder(
            note_seq.OneHotEventSequenceEncoderDecoder(
                note_seq.MajorMinorChordOneHotEncoding()),
            note_seq.OneHotEventSequenceEncoderDecoder(
                note_seq.MelodyOneHotEncoding(self.config.min_note,
                                              self.config.max_note)))
        quantized = quantizer.transform(note_sequence)[0]
        lead_sheet = lead_sheet_extractor.transform(quantized)[0]
        lead_sheet.squash(self.config.min_note, self.config.max_note,
                          self.config.transpose_to_key)
        encoded = pipelines_common.make_sequence_example(
            *conditional_encoding.encode(lead_sheet.chords, lead_sheet.melody))
        expected_result = {
            'training_lead_sheets': [encoded],
            'eval_lead_sheets': []
        }

        pipeline_inst = improv_rnn_pipeline.get_pipeline(self.config,
                                                         eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
Exemple #4
0
 def testLeadSheetExtractor(self):
     note_sequence = common_testing_lib.parse_test_proto(
         music_pb2.NoteSequence, """
     time_signatures: {
       numerator: 4
       denominator: 4}
     tempos: {
       qpm: 60}""")
     music_testing_lib.add_track_to_sequence(note_sequence,
                                             0, [(12, 100, 2, 4),
                                                 (11, 1, 6, 7)])
     music_testing_lib.add_track_to_sequence(note_sequence,
                                             1, [(12, 127, 2, 4),
                                                 (14, 50, 6, 8)])
     music_testing_lib.add_chords_to_sequence(note_sequence, [('Cm7', 2),
                                                              ('F9', 4),
                                                              ('G7b9', 6)])
     quantized_sequence = sequences_lib.quantize_note_sequence(
         note_sequence, steps_per_quarter=1)
     expected_melody_events = [[
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 11
     ], [
         NO_EVENT, NO_EVENT, 12, NO_EVENT, NOTE_OFF, NO_EVENT, 14, NO_EVENT
     ]]
     expected_chord_events = [[
         NO_CHORD, NO_CHORD, 'Cm7', 'Cm7', 'F9', 'F9', 'G7b9'
     ], [NO_CHORD, NO_CHORD, 'Cm7', 'Cm7', 'F9', 'F9', 'G7b9', 'G7b9']]
     expected_lead_sheets = []
     for melody_events, chord_events in zip(expected_melody_events,
                                            expected_chord_events):
         melody = melodies_lib.Melody(melody_events,
                                      steps_per_quarter=1,
                                      steps_per_bar=4)
         chords = chords_lib.ChordProgression(chord_events,
                                              steps_per_quarter=1,
                                              steps_per_bar=4)
         lead_sheet = lead_sheets_lib.LeadSheet(melody, chords)
         expected_lead_sheets.append(lead_sheet)
     unit = lead_sheet_pipelines.LeadSheetExtractor(
         min_bars=1,
         min_unique_pitches=1,
         gap_bars=1,
         all_transpositions=False)
     self._unit_transform_test(unit, quantized_sequence,
                               expected_lead_sheets)