def testMelodyRNNPipeline(self): note_sequence = magenta.common.testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 120}""") magenta.music.testing_lib.add_track_to_sequence( note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0), (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0), (53, 99, 11.1, 14.1)]) quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4) melody_extractor = melody_pipelines.MelodyExtractor( min_bars=7, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=False) one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder( magenta.music.MelodyOneHotEncoding(self.config.min_note, self.config.max_note)) quantized = quantizer.transform(note_sequence)[0] melody = melody_extractor.transform(quantized)[0] melody.squash(self.config.min_note, self.config.max_note, self.config.transpose_to_key) one_hot = pipelines_common.make_sequence_example( *one_hot_encoding.encode(melody)) expected_result = {'training_melodies': [one_hot], 'eval_melodies': []} pipeline_inst = melody_rnn_pipeline.get_pipeline(self.config, eval_ratio=0.0) result = pipeline_inst.transform(note_sequence) self.assertEqual(expected_result, result)
def testDrumsRNNPipeline(self): note_sequence = magenta.common.testing_lib.parse_test_proto( music_pb2.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 120}""") magenta.music.testing_lib.add_track_to_sequence( note_sequence, 0, [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0), (41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0), (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1), (51, 40, 12.6, 13.0), (55, 100, 14.1, 15.0), (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)], is_drum=True) quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4) drums_extractor = drum_pipelines.DrumsExtractor(min_bars=7, gap_bars=1.0) one_hot_encoding = magenta.music.OneHotEventSequenceEncoderDecoder( magenta.music.MultiDrumOneHotEncoding()) quantized = quantizer.transform(note_sequence)[0] drums = drums_extractor.transform(quantized)[0] one_hot = pipelines_common.make_sequence_example( *one_hot_encoding.encode(drums)) expected_result = { 'training_drum_tracks': [one_hot], 'eval_drum_tracks': [] } pipeline_inst = drums_rnn_pipeline.get_pipeline(self.config, eval_ratio=0.0) result = pipeline_inst.transform(note_sequence) self.assertEqual(expected_result, result)
def transform(self, input_object): performance = input_object if self._control_signals: # Encode conditional on control signals. control_sequences = [] for control in self._control_signals: control_sequences.append(control.extract(performance)) control_sequence = list(zip(*control_sequences)) if self._optional_conditioning: # Create two copies, one with and one without conditioning. # pylint: disable=g-complex-comprehension encoded = [ self._encoder_decoder.encode( list( zip([disable] * len(control_sequence), control_sequence)), performance) for disable in [False, True] ] # pylint: enable=g-complex-comprehension else: encoded = [ self._encoder_decoder.encode(control_sequence, performance) ] else: # Encode unconditional. encoded = [self._encoder_decoder.encode(performance)] return [ pipelines_common.make_sequence_example(*enc) for enc in encoded ]
def transform(self, input_object): melody = input_object melody.squash( self._min_note, self._max_note, self._transpose_to_key) encoded = pipelines_common.make_sequence_example( *self._melody_encoder_decoder.encode(melody)) return [encoded]
def testMelodyRNNPipeline(self): note_sequence = magenta.common.testing_lib.parse_test_proto( note_seq.NoteSequence, """ time_signatures: { numerator: 4 denominator: 4} tempos: { qpm: 120}""") note_seq.testing_lib.add_track_to_sequence(note_sequence, 0, [(12, 100, 0.00, 2.0), (11, 55, 2.1, 5.0), (40, 45, 5.1, 8.0), (55, 120, 8.1, 11.0), (53, 99, 11.1, 14.1)]) note_seq.testing_lib.add_chords_to_sequence(note_sequence, [('N.C.', 0.0), ('Am9', 5.0), ('D7', 10.0)]) quantizer = note_sequence_pipelines.Quantizer(steps_per_quarter=4) lead_sheet_extractor = lead_sheet_pipelines.LeadSheetExtractor( min_bars=7, min_unique_pitches=5, gap_bars=1.0, ignore_polyphonic_notes=False, all_transpositions=False) conditional_encoding = note_seq.ConditionalEventSequenceEncoderDecoder( note_seq.OneHotEventSequenceEncoderDecoder( note_seq.MajorMinorChordOneHotEncoding()), note_seq.OneHotEventSequenceEncoderDecoder( note_seq.MelodyOneHotEncoding(self.config.min_note, self.config.max_note))) quantized = quantizer.transform(note_sequence)[0] lead_sheet = lead_sheet_extractor.transform(quantized)[0] lead_sheet.squash(self.config.min_note, self.config.max_note, self.config.transpose_to_key) encoded = pipelines_common.make_sequence_example( *conditional_encoding.encode(lead_sheet.chords, lead_sheet.melody)) expected_result = { 'training_lead_sheets': [encoded], 'eval_lead_sheets': [] } pipeline_inst = improv_rnn_pipeline.get_pipeline(self.config, eval_ratio=0.0) result = pipeline_inst.transform(note_sequence) self.assertEqual(expected_result, result)
def transform(self, lead_sheet): lead_sheet.squash(self._min_note, self._max_note, self._transpose_to_key) try: encoded = [ self._conditional_encoder_decoder.encode( lead_sheet.chords, lead_sheet.melody) ] stats = [] except note_seq.ChordEncodingError as e: tf.logging.warning('Skipped lead sheet: %s', e) encoded = [] stats = [statistics.Counter('chord_encoding_exception', 1)] except note_seq.ChordSymbolError as e: tf.logging.warning('Skipped lead sheet: %s', e) encoded = [] stats = [statistics.Counter('chord_symbol_exception', 1)] self._set_stats(stats) return [ pipelines_common.make_sequence_example(*enc) for enc in encoded ]
def transform(self, input_object): seq = input_object encoded = pipelines_common.make_sequence_example( *self._encoder_decoder.encode(seq)) return [encoded]
def transform(self, seq): encoded = pipelines_common.make_sequence_example( *self._encoder_decoder.encode(seq)) return [encoded]