def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A DrumsRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) drums_extractor = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractor_' + mode) encoder_pipeline = event_sequence_pipeline.EncoderPipeline( magenta.music.DrumTrack, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_drum_tracks'] dag[quantizer] = time_change_splitter dag[drums_extractor] = quantizer dag[encoder_pipeline] = drums_extractor dag[dag_pipeline.DagOutput(mode + '_drum_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An ImprovRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ all_transpositions = config.transpose_to_key is None partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_lead_sheets', 'training_lead_sheets'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor( min_bars=7, max_steps=512, min_unique_pitches=3, gap_bars=1.0, ignore_polyphonic_notes=False, all_transpositions=all_transpositions, name='LeadSheetExtractor_' + mode) encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_lead_sheets'] dag[quantizer] = time_change_splitter dag[lead_sheet_extractor] = quantizer dag[encoder_pipeline] = lead_sheet_extractor dag[dag_pipeline.DagOutput(mode + '_lead_sheets')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A MelodyRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_melodies', 'training_melodies'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) melody_extractor = melody_pipelines.MelodyExtractor( min_bars=7, max_steps=512, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=True, name='MelodyExtractor_' + mode) encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_melodies'] dag[quantizer] = time_change_splitter dag[melody_extractor] = quantizer dag[encoder_pipeline] = melody_extractor dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, eval_ratio=0.0): partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_melodies', 'training_melodies'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) repeat_sequence = RepeatSequence(min_duration=16, name='RepeatSequence_' + mode) transposition_pipeline = note_sequence_pipelines.TranspositionPipeline( (0, ), name='TranspositionPipeline_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) melody_extractor = melody_pipelines.MelodyExtractor( min_bars=7, max_steps=512, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=True, name='MelodyExtractor_' + mode) encoder_pipeline = EncoderPipeline(config, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_melodies'] dag[repeat_sequence] = time_change_splitter dag[quantizer] = repeat_sequence dag[transposition_pipeline] = quantizer dag[melody_extractor] = transposition_pipeline dag[encoder_pipeline] = melody_extractor dag[dag_pipeline.DagOutput(mode + '_melodies')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, min_steps, max_steps, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An EventSequenceRnnConfig. min_steps: Minimum number of steps for an extracted sequence. max_steps: Maximum number of steps for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ # Transpose up to a major third in either direction. # Because our current dataset is Bach chorales, transposing more than a major # third in either direction probably doesn't makes sense (e.g., because it is # likely to exceed normal singing range). transposition_range = range(-4, 5) partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_poly_tracks', 'training_poly_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) transposition_pipeline = note_sequence_pipelines.TranspositionPipeline( transposition_range, name='TranspositionPipeline_' + mode) poly_extractor = PolyphonicSequenceExtractor(min_steps=min_steps, max_steps=max_steps, name='PolyExtractor_' + mode) encoder_pipeline = event_sequence_pipeline.EncoderPipeline( polyphony_lib.PolyphonicSequence, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_poly_tracks'] dag[quantizer] = time_change_splitter dag[transposition_pipeline] = quantizer dag[poly_extractor] = transposition_pipeline dag[encoder_pipeline] = poly_extractor dag[dag_pipeline.DagOutput(mode + '_poly_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def get_pipeline(config, min_steps, max_steps, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: An EventSequenceRnnConfig. min_steps: Minimum number of steps for an extracted sequence. max_steps: Maximum number of steps for an extracted sequence. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ # Transpose up to a major third in either direction. transposition_range = list(range(-4, 5)) partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_pianoroll_tracks', 'training_pianoroll_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) transposition_pipeline = note_sequence_pipelines.TranspositionPipeline( transposition_range, name='TranspositionPipeline_' + mode) pianoroll_extractor = PianorollSequenceExtractor( min_steps=min_steps, max_steps=max_steps, name='PianorollExtractor_' + mode) encoder_pipeline = event_sequence_pipeline.EncoderPipeline( mm.PianorollSequence, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_pianoroll_tracks'] dag[quantizer] = time_change_splitter dag[transposition_pipeline] = quantizer dag[pianoroll_extractor] = transposition_pipeline dag[encoder_pipeline] = pianoroll_extractor dag[dag_pipeline.DagOutput(mode + '_pianoroll_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def testTimeChangeSplitter(self): note_sequence = common_testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { time: 2.0 numerator: 3 denominator: 4} tempos: { qpm: 60}""") testing_lib.add_track_to_sequence( note_sequence, 0, [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)]) expected_sequences = sequences_lib.split_note_sequence_on_time_changes( note_sequence) unit = note_sequence_pipelines.TimeChangeSplitter() self._unit_transform_test(unit, note_sequence, expected_sequences)