Ejemplo n.º 1
0
def get_pipeline(config, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An ImprovRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  all_transpositions = config.transpose_to_key is None
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_lead_sheets', 'training_lead_sheets'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = note_sequence_pipelines.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
        min_bars=7, max_steps=512, min_unique_pitches=3, gap_bars=1.0,
        ignore_polyphonic_notes=False, all_transpositions=all_transpositions,
        name='LeadSheetExtractor_' + mode)
    encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_lead_sheets']
    dag[quantizer] = time_change_splitter
    dag[lead_sheet_extractor] = quantizer
    dag[encoder_pipeline] = lead_sheet_extractor
    dag[dag_pipeline.DagOutput(mode + '_lead_sheets')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)
    def testMelodyRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0),
                               (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0),
                               (53, 99, 11.1, 14.1)])

        quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4)
        melody_extractor = melody_pipelines.MelodyExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False)
        one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MelodyOneHotEncoding(self.config.min_note,
                                               self.config.max_note))
        quantized = quantizer.transform(note_sequence)[0]
        melody = melody_extractor.transform(quantized)[0]
        melody.squash(self.config.min_note, self.config.max_note,
                      self.config.transpose_to_key)
        one_hot = one_hot_encoding.encode(melody)
        expected_result = {'training_melodies': [one_hot], 'eval_melodies': []}

        pipeline_inst = melody_rnn_pipeline.get_pipeline(self.config,
                                                         eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
Ejemplo n.º 3
0
def get_pipeline(config, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A DrumsRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence,
        ['eval_drum_tracks', 'training_drum_tracks'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
        drums_extractor = drum_pipelines.DrumsExtractor(
            min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractor_' + mode)
        encoder_pipeline = event_sequence_pipeline.EncoderPipeline(
            magenta.music.DrumTrack, config.encoder_decoder,
            name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_drum_tracks']
        dag[quantizer] = time_change_splitter
        dag[drums_extractor] = quantizer
        dag[encoder_pipeline] = drums_extractor
        dag[dag_pipeline.DagOutput(mode + '_drum_tracks')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
    def testDrumsRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        magenta.music.testing_lib.add_track_to_sequence(
            note_sequence,
            0, [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0),
                (41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0),
                (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1),
                (51, 40, 12.6, 13.0), (55, 100, 14.1, 15.0),
                (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)],
            is_drum=True)

        quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4)
        drums_extractor = drum_pipelines.DrumsExtractor(min_bars=7,
                                                        gap_bars=1.0)
        one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder(
            magenta.music.MultiDrumOneHotEncoding())
        quantized = quantizer.transform(note_sequence)[0]
        drums = drums_extractor.transform(quantized)[0]
        one_hot = one_hot_encoding.encode(drums)
        expected_result = {
            'training_drum_tracks': [one_hot],
            'eval_drum_tracks': []
        }

        pipeline_inst = drums_rnn_pipeline.get_pipeline(self.config,
                                                        eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
Ejemplo n.º 5
0
def get_pipeline(config, min_events, max_events, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A PerformanceRnnConfig.
    min_events: Minimum number of events for an extracted sequence.
    max_events: Maximum number of events for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
    stretch_factors = [0.95, 0.975, 1.0, 1.025, 1.05]

    # Transpose no more than a major third.
    transposition_range = range(-3, 4)

    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_performances', 'training_performances'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        sustain_pipeline = note_sequence_pipelines.SustainPipeline(
            name='SustainPipeline_' + mode)
        stretch_pipeline = note_sequence_pipelines.StretchPipeline(
            stretch_factors, name='StretchPipeline_' + mode)
        splitter = note_sequence_pipelines.Splitter(hop_size_seconds=30.0,
                                                    name='Splitter_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_second=config.steps_per_second, name='Quantizer_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            transposition_range, name='TranspositionPipeline_' + mode)
        perf_extractor = PerformanceExtractor(
            min_events=min_events,
            max_events=max_events,
            num_velocity_bins=config.num_velocity_bins,
            name='PerformanceExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[sustain_pipeline] = partitioner[mode + '_performances']
        if mode == 'eval':
            # No stretching in eval.
            dag[splitter] = sustain_pipeline
        else:
            dag[stretch_pipeline] = sustain_pipeline
            dag[splitter] = stretch_pipeline
        dag[quantizer] = splitter
        if mode == 'eval':
            # No transposition in eval.
            dag[perf_extractor] = quantizer
        else:
            dag[transposition_pipeline] = quantizer
            dag[perf_extractor] = transposition_pipeline
        dag[encoder_pipeline] = perf_extractor
        dag[dag_pipeline.DagOutput(mode + '_performances')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
Ejemplo n.º 6
0
def get_pipeline(config, eval_ratio):
  """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A MelodyRnnConfig object.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
  partitioner = pipelines_common.RandomPartition(
      music_pb2.NoteSequence,
      ['eval_melodies', 'training_melodies'],
      [eval_ratio])
  dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

  for mode in ['eval', 'training']:
    time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
        name='TimeChangeSplitter_' + mode)
    quantizer = note_sequence_pipelines.Quantizer(
        steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode)
    melody_extractor = melody_pipelines.MelodyExtractor(
        min_bars=7, max_steps=512, min_unique_pitches=5,
        gap_bars=1.0, ignore_polyphonic_notes=True,
        name='MelodyExtractor_' + mode)
    encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode)

    dag[time_change_splitter] = partitioner[mode + '_melodies']
    dag[quantizer] = time_change_splitter
    dag[melody_extractor] = quantizer
    dag[encoder_pipeline] = melody_extractor
    dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline

  return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio=0.0):
    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_melodies', 'training_melodies'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        repeat_sequence = RepeatSequence(min_duration=16,
                                         name='RepeatSequence_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            (0, ), name='TranspositionPipeline_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_quarter=config.steps_per_quarter,
            name='Quantizer_' + mode)
        melody_extractor = melody_pipelines.MelodyExtractor(
            min_bars=7,
            max_steps=512,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=True,
            name='MelodyExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_melodies']
        dag[repeat_sequence] = time_change_splitter
        dag[quantizer] = repeat_sequence
        dag[transposition_pipeline] = quantizer
        dag[melody_extractor] = transposition_pipeline
        dag[encoder_pipeline] = melody_extractor
        dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
Ejemplo n.º 8
0
def get_pipeline(config, min_steps, max_steps, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    # Transpose up to a major third in either direction.
    # Because our current dataset is Bach chorales, transposing more than a major
    # third in either direction probably doesn't makes sense (e.g., because it is
    # likely to exceed normal singing range).
    transposition_range = range(-4, 5)

    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_quarter=config.steps_per_quarter,
            name='Quantizer_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            transposition_range, name='TranspositionPipeline_' + mode)
        poly_extractor = PolyphonicSequenceExtractor(min_steps=min_steps,
                                                     max_steps=max_steps,
                                                     name='PolyExtractor_' +
                                                     mode)
        encoder_pipeline = event_sequence_pipeline.EncoderPipeline(
            polyphony_lib.PolyphonicSequence,
            config.encoder_decoder,
            name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_poly_tracks']
        dag[quantizer] = time_change_splitter
        dag[transposition_pipeline] = quantizer
        dag[poly_extractor] = transposition_pipeline
        dag[encoder_pipeline] = poly_extractor
        dag[dag_pipeline.DagOutput(mode + '_poly_tracks')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
    def testMelodyRNNPipeline(self):
        note_sequence = magenta.common.testing_lib.parse_test_proto(
            note_seq.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 120}""")
        note_seq.testing_lib.add_track_to_sequence(note_sequence, 0,
                                                   [(12, 100, 0.00, 2.0),
                                                    (11, 55, 2.1, 5.0),
                                                    (40, 45, 5.1, 8.0),
                                                    (55, 120, 8.1, 11.0),
                                                    (53, 99, 11.1, 14.1)])
        note_seq.testing_lib.add_chords_to_sequence(note_sequence,
                                                    [('N.C.', 0.0),
                                                     ('Am9', 5.0),
                                                     ('D7', 10.0)])

        quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4)
        lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor(
            min_bars=7,
            min_unique_pitches=5,
            gap_bars=1.0,
            ignore_polyphonic_notes=False,
            all_transpositions=False)
        conditional_encoding = note_seq.ConditionalEventSequenceEncoderDecoder(
            note_seq.OneHotEventSequenceEncoderDecoder(
                note_seq.MajorMinorChordOneHotEncoding()),
            note_seq.OneHotEventSequenceEncoderDecoder(
                note_seq.MelodyOneHotEncoding(self.config.min_note,
                                              self.config.max_note)))
        quantized = quantizer.transform(note_sequence)[0]
        lead_sheet = lead_sheet_extractor.transform(quantized)[0]
        lead_sheet.squash(self.config.min_note, self.config.max_note,
                          self.config.transpose_to_key)
        encoded = pipelines_common.make_sequence_example(
            *conditional_encoding.encode(lead_sheet.chords, lead_sheet.melody))
        expected_result = {
            'training_lead_sheets': [encoded],
            'eval_lead_sheets': []
        }

        pipeline_inst = improv_rnn_pipeline.get_pipeline(self.config,
                                                         eval_ratio=0.0)
        result = pipeline_inst.transform(note_sequence)
        self.assertEqual(expected_result, result)
Ejemplo n.º 10
0
def get_pipeline(config, min_steps, max_steps, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: An EventSequenceRnnConfig.
    min_steps: Minimum number of steps for an extracted sequence.
    max_steps: Maximum number of steps for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    # Transpose up to a major third in either direction.
    transposition_range = list(range(-4, 5))

    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence,
        ['eval_pianoroll_tracks', 'training_pianoroll_tracks'], [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        time_change_splitter = note_sequence_pipelines.TimeChangeSplitter(
            name='TimeChangeSplitter_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_quarter=config.steps_per_quarter,
            name='Quantizer_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            transposition_range, name='TranspositionPipeline_' + mode)
        pianoroll_extractor = PianorollSequenceExtractor(
            min_steps=min_steps,
            max_steps=max_steps,
            name='PianorollExtractor_' + mode)
        encoder_pipeline = event_sequence_pipeline.EncoderPipeline(
            mm.PianorollSequence,
            config.encoder_decoder,
            name='EncoderPipeline_' + mode)

        dag[time_change_splitter] = partitioner[mode + '_pianoroll_tracks']
        dag[quantizer] = time_change_splitter
        dag[transposition_pipeline] = quantizer
        dag[pianoroll_extractor] = transposition_pipeline
        dag[encoder_pipeline] = pianoroll_extractor
        dag[dag_pipeline.DagOutput(mode +
                                   '_pianoroll_tracks')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)
  def testQuantizer(self):
    steps_per_quarter = 4
    note_sequence = common_testing_lib.parse_test_proto(
        music_pb2.NoteSequence,
        """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
    testing_lib.add_track_to_sequence(
        note_sequence, 0,
        [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),
         (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])
    expected_quantized_sequence = sequences_lib.quantize_note_sequence(
        note_sequence, steps_per_quarter)

    unit = note_sequence_pipelines.Quantizer(steps_per_quarter)
    self._unit_transform_test(unit, note_sequence,
                              [expected_quantized_sequence])