def get_pipeline(config, steps_per_quarter, min_steps, max_steps, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An EventSequenceRnnConfig. steps_per_quarter: How many steps per quarter to use when quantizing. min_steps: Minimum number of steps for an extracted sequence. max_steps: Maximum number of steps for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ quantizer = pipelines_common.Quantizer(steps_per_quarter=steps_per_quarter) # Transpose up to a major third in either direction. # Because our current dataset is Bach chorales, transposing more than a major # third in either direction probably doesn't makes sense (e.g., because it is # likely to exceed normal singing range). transposition_range = range(-4, 5) transposition_pipeline_train = sequences_lib.TranspositionPipeline( transposition_range, name='TranspositionPipelineTrain') transposition_pipeline_eval = sequences_lib.TranspositionPipeline( transposition_range, name='TranspositionPipelineEval') poly_extractor_train = PolyphonicSequenceExtractor( min_steps=min_steps, max_steps=max_steps, name='PolyExtractorTrain') poly_extractor_eval = PolyphonicSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name='PolyExtractorEval') encoder_pipeline_train = encoder_decoder.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipelineTrain') encoder_pipeline_eval = encoder_decoder.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipelineEval') partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'], [eval_ratio]) dag = { quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence), partitioner: quantizer, transposition_pipeline_train: partitioner['training_poly_tracks'], transposition_pipeline_eval: partitioner['eval_poly_tracks'], poly_extractor_train: transposition_pipeline_train, poly_extractor_eval: transposition_pipeline_eval, encoder_pipeline_train: poly_extractor_train, encoder_pipeline_eval: poly_extractor_eval, dag_pipeline.DagOutput('training_poly_tracks'): encoder_pipeline_train, dag_pipeline.DagOutput('eval_poly_tracks'): encoder_pipeline_eval } return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A DrumsRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = pipelines_common.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = pipelines_common.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) drums_extractor = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractor_' + mode) encoder_pipeline = encoder_decoder.EncoderPipeline( magenta.music.DrumTrack, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_drum_tracks'] dag[quantizer] = time_change_splitter dag[drums_extractor] = quantizer dag[encoder_pipeline] = drums_extractor dag[dag_pipeline.DagOutput(mode + '_drum_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, min_events, max_events, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A PerformanceRnnConfig. min_events: Minimum number of events for an extracted sequence. max_events: Maximum number of events for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ # Stretch by -5%, -2.5%, 0%, 2.5%, and 5%. stretch_factors = [0.95, 0.975, 1.0, 1.025, 1.05] # Transpose no more than a major third. transposition_range = range(-3, 4) partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_performances', 'training_performances'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: sustain_pipeline = note_sequence_pipelines.SustainPipeline( name='SustainPipeline_' + mode) stretch_pipeline = note_sequence_pipelines.StretchPipeline( stretch_factors, name='StretchPipeline_' + mode) splitter = note_sequence_pipelines.Splitter( hop_size_seconds=30.0, name='Splitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_second=config.steps_per_second, name='Quantizer_' + mode) transposition_pipeline = note_sequence_pipelines.TranspositionPipeline( transposition_range, name='TranspositionPipeline_' + mode) perf_extractor = PerformanceExtractor( min_events=min_events, max_events=max_events, num_velocity_bins=config.num_velocity_bins, name='PerformanceExtractor_' + mode) encoder_pipeline = encoder_decoder.EncoderPipeline( performance_lib.Performance, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[sustain_pipeline] = partitioner[mode + '_performances'] dag[stretch_pipeline] = sustain_pipeline dag[splitter] = stretch_pipeline dag[quantizer] = splitter dag[transposition_pipeline] = quantizer dag[perf_extractor] = transposition_pipeline dag[encoder_pipeline] = perf_extractor dag[dag_pipeline.DagOutput(mode + '_performances')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, steps_per_quarter, min_steps, max_steps, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An EventSequenceRnnConfig. steps_per_quarter: How many steps per quarter to use when quantizing. min_steps: Minimum number of steps for an extracted sequence. max_steps: Maximum number of steps for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ quantizer = pipelines_common.Quantizer(steps_per_quarter=steps_per_quarter) poly_extractor_train = PolyphonicSequenceExtractor( min_steps=min_steps, max_steps=max_steps, name='PolyExtractorTrain') poly_extractor_eval = PolyphonicSequenceExtractor( min_steps=min_steps, max_steps=max_steps, name='PolyExtractorEval') encoder_pipeline_train = encoder_decoder.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipelineTrain') encoder_pipeline_eval = encoder_decoder.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipelineEval') partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'], [eval_ratio]) dag = {quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence), partitioner: quantizer, poly_extractor_train: partitioner['training_poly_tracks'], poly_extractor_eval: partitioner['eval_poly_tracks'], encoder_pipeline_train: poly_extractor_train, encoder_pipeline_eval: poly_extractor_eval, dag_pipeline.DagOutput('training_poly_tracks'): encoder_pipeline_train, dag_pipeline.DagOutput('eval_poly_tracks'): encoder_pipeline_eval} return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A DrumsRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ quantizer = pipelines_common.Quantizer(steps_per_quarter=4) drums_extractor_train = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorTrain') drums_extractor_eval = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorEval') encoder_pipeline_train = encoder_decoder.EncoderPipeline( magenta.music.DrumTrack, config.encoder_decoder, name='EncoderPipelineTrain') encoder_pipeline_eval = encoder_decoder.EncoderPipeline( magenta.music.DrumTrack, config.encoder_decoder, name='EncoderPipelineEval') partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'], [eval_ratio]) dag = { quantizer: dag_pipeline.DagInput(music_pb2.NoteSequence), partitioner: quantizer, drums_extractor_train: partitioner['training_drum_tracks'], drums_extractor_eval: partitioner['eval_drum_tracks'], encoder_pipeline_train: drums_extractor_train, encoder_pipeline_eval: drums_extractor_eval, dag_pipeline.DagOutput('training_drum_tracks'): encoder_pipeline_train, dag_pipeline.DagOutput('eval_drum_tracks'): encoder_pipeline_eval } return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, min_steps, max_steps, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An EventSequenceRnnConfig. min_steps: Minimum number of steps for an extracted sequence. max_steps: Maximum number of steps for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ # Transpose up to a major third in either direction. # Because our current dataset is Bach chorales, transposing more than a major # third in either direction probably doesn't makes sense (e.g., because it is # likely to exceed normal singing range). transposition_range = range(-4, 5) partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) transposition_pipeline = note_sequence_pipelines.TranspositionPipeline( transposition_range, name='TranspositionPipeline_' + mode) poly_extractor = PolyphonicSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name='PolyExtractor_' + mode) encoder_pipeline = encoder_decoder.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_poly_tracks'] dag[quantizer] = time_change_splitter dag[transposition_pipeline] = quantizer dag[poly_extractor] = transposition_pipeline dag[encoder_pipeline] = poly_extractor dag[dag_pipeline.DagOutput(mode + '_poly_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)