示例#1
0
    def testSustainPipeline(self):
        note_sequence = common_testing_lib.parse_test_proto(
            music_pb2.NoteSequence, """
        time_signatures: {
          numerator: 4
          denominator: 4}
        tempos: {
          qpm: 60}""")
        testing_lib.add_track_to_sequence(note_sequence, 0,
                                          [(11, 55, 0.22, 0.50),
                                           (40, 45, 2.50, 3.50),
                                           (55, 120, 4.0, 4.01)])
        testing_lib.add_control_changes_to_sequence(note_sequence, 0,
                                                    [(0.0, 64, 127),
                                                     (0.75, 64, 0),
                                                     (2.0, 64, 127),
                                                     (3.0, 64, 0),
                                                     (3.75, 64, 127),
                                                     (4.5, 64, 127),
                                                     (4.8, 64, 0),
                                                     (4.9, 64, 127),
                                                     (6.0, 64, 0)])
        expected_sequence = sequences_lib.apply_sustain_control_changes(
            note_sequence)

        unit = note_sequence_pipelines.SustainPipeline()
        self._unit_transform_test(unit, note_sequence, [expected_sequence])
示例#2
0
def get_pipeline(config, min_events, max_events, eval_ratio):
    """Returns the Pipeline instance which creates the RNN dataset.

  Args:
    config: A PerformanceRnnConfig.
    min_events: Minimum number of events for an extracted sequence.
    max_events: Maximum number of events for an extracted sequence.
    eval_ratio: Fraction of input to set aside for evaluation set.

  Returns:
    A pipeline.Pipeline instance.
  """
    # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%.
    stretch_factors = [0.95, 0.975, 1.0, 1.025, 1.05]

    # Transpose no more than a major third.
    transposition_range = range(-3, 4)

    partitioner = pipelines_common.RandomPartition(
        music_pb2.NoteSequence, ['eval_performances', 'training_performances'],
        [eval_ratio])
    dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)}

    for mode in ['eval', 'training']:
        sustain_pipeline = note_sequence_pipelines.SustainPipeline(
            name='SustainPipeline_' + mode)
        stretch_pipeline = note_sequence_pipelines.StretchPipeline(
            stretch_factors, name='StretchPipeline_' + mode)
        splitter = note_sequence_pipelines.Splitter(hop_size_seconds=30.0,
                                                    name='Splitter_' + mode)
        quantizer = note_sequence_pipelines.Quantizer(
            steps_per_second=config.steps_per_second, name='Quantizer_' + mode)
        transposition_pipeline = note_sequence_pipelines.TranspositionPipeline(
            transposition_range, name='TranspositionPipeline_' + mode)
        perf_extractor = PerformanceExtractor(
            min_events=min_events,
            max_events=max_events,
            num_velocity_bins=config.num_velocity_bins,
            name='PerformanceExtractor_' + mode)
        encoder_pipeline = EncoderPipeline(config,
                                           name='EncoderPipeline_' + mode)

        dag[sustain_pipeline] = partitioner[mode + '_performances']
        if mode == 'eval':
            # No stretching in eval.
            dag[splitter] = sustain_pipeline
        else:
            dag[stretch_pipeline] = sustain_pipeline
            dag[splitter] = stretch_pipeline
        dag[quantizer] = splitter
        if mode == 'eval':
            # No transposition in eval.
            dag[perf_extractor] = quantizer
        else:
            dag[transposition_pipeline] = quantizer
            dag[perf_extractor] = transposition_pipeline
        dag[encoder_pipeline] = perf_extractor
        dag[dag_pipeline.DagOutput(mode + '_performances')] = encoder_pipeline

    return dag_pipeline.DAGPipeline(dag)