def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A DrumsRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ quantizer = pipelines_common.Quantizer(steps_per_quarter=4) drums_extractor_train = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorTrain') drums_extractor_eval = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractorEval') encoder_pipeline_train = EncoderPipeline(config, name='EncoderPipelineTrain') encoder_pipeline_eval = EncoderPipeline(config, name='EncoderPipelineEval') partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'], [eval_ratio]) dag = { quantizer: dag_pipeline.Input(music_pb2.NoteSequence), partitioner: quantizer, drums_extractor_train: partitioner['training_drum_tracks'], drums_extractor_eval: partitioner['eval_drum_tracks'], encoder_pipeline_train: drums_extractor_train, encoder_pipeline_eval: drums_extractor_eval, dag_pipeline.Output('training_drum_tracks'): encoder_pipeline_train, dag_pipeline.Output('eval_drum_tracks'): encoder_pipeline_eval } return dag_pipeline.DAGPipeline(dag)
def testDrumsExtractor(self): note_sequence = common_testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 60}""") testing_lib.add_track_to_sequence(note_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 7), (12, 1, 6, 8)], is_drum=True) testing_lib.add_track_to_sequence(note_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8)]) quantized_sequence = sequences_lib.quantize_note_sequence( note_sequence, steps_per_quarter=1) expected_events = [[ NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(11, 12) ]] expected_drum_tracks = [] for events_list in expected_events: drums = drums_lib.DrumTrack(events_list, steps_per_quarter=1, steps_per_bar=4) expected_drum_tracks.append(drums) unit = drum_pipelines.DrumsExtractor(min_bars=1, gap_bars=1) self._unit_transform_test(unit, quantized_sequence, expected_drum_tracks)
def get_pipeline(config, eval_ratio): """Returns the Pipeline instance which creates the RNN dataset. Args: config: A DrumsRnnConfig object. eval_ratio: Fraction of input to set aside for evaluation set. Returns: A pipeline.Pipeline instance. """ partitioner = pipelines_common.RandomPartition( music_pb2.NoteSequence, ['eval_drum_tracks', 'training_drum_tracks'], [eval_ratio]) dag = {partitioner: dag_pipeline.DagInput(music_pb2.NoteSequence)} for mode in ['eval', 'training']: time_change_splitter = note_sequence_pipelines.TimeChangeSplitter( name='TimeChangeSplitter_' + mode) quantizer = note_sequence_pipelines.Quantizer( steps_per_quarter=config.steps_per_quarter, name='Quantizer_' + mode) drums_extractor = drum_pipelines.DrumsExtractor( min_bars=7, max_steps=512, gap_bars=1.0, name='DrumsExtractor_' + mode) encoder_pipeline = event_sequence_pipeline.EncoderPipeline( magenta.music.DrumTrack, config.encoder_decoder, name='EncoderPipeline_' + mode) dag[time_change_splitter] = partitioner[mode + '_drum_tracks'] dag[quantizer] = time_change_splitter dag[drums_extractor] = quantizer dag[encoder_pipeline] = drums_extractor dag[dag_pipeline.DagOutput(mode + '_drum_tracks')] = encoder_pipeline return dag_pipeline.DAGPipeline(dag)
def testDrumsRNNPipeline(self): note_sequence = magenta.common.testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 120}""") magenta.music.testing_lib.add_track_to_sequence( note_sequence, 0, [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0), (41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0), (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1), (51, 40, 12.6, 13.0), (55, 100, 14.1, 15.0), (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)], is_drum=True) quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4) drums_extractor = drum_pipelines.DrumsExtractor(min_bars=7, gap_bars=1.0) one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder( magenta.music.MultiDrumOneHotEncoding()) quantized = quantizer.transform(note_sequence)[0] drums = drums_extractor.transform(quantized)[0] one_hot = one_hot_encoding.encode(drums) expected_result = { 'training_drum_tracks': [one_hot], 'eval_drum_tracks': [] } pipeline_inst = drums_rnn_pipeline.get_pipeline(self.config, eval_ratio=0.0) result = pipeline_inst.transform(note_sequence) self.assertEqual(expected_result, result)
def testDrumsExtractor(self): quantized_sequence = sequences_lib.QuantizedSequence() quantized_sequence.steps_per_quarter = 1 testing_lib.add_quantized_track_to_sequence( quantized_sequence, 0, [(12, 100, 2, 4), (11, 1, 6, 7), (12, 1, 6, 8)], is_drum=True) testing_lib.add_quantized_track_to_sequence( quantized_sequence, 1, [(12, 127, 2, 4), (14, 50, 6, 8)]) expected_events = [ [NO_DRUMS, NO_DRUMS, DRUMS(12), NO_DRUMS, NO_DRUMS, NO_DRUMS, DRUMS(11, 12)]] expected_drum_tracks = [] for events_list in expected_events: drums = drums_lib.DrumTrack( events_list, steps_per_quarter=1, steps_per_bar=4) expected_drum_tracks.append(drums) unit = drum_pipelines.DrumsExtractor(min_bars=1, gap_bars=1) self._unit_transform_test(unit, quantized_sequence, expected_drum_tracks)