def testVocabSize(self): encoder = music_encoders.CompositeScoreEncoder([ music_encoders.TextChordsEncoder(steps_per_quarter=4), music_encoders.TextMelodyEncoder( steps_per_quarter=4, min_pitch=21, max_pitch=108) ]) self.assertEqual([51, 92], encoder.vocab_size)
def feature_encoders(self, data_dir): del data_dir encoders = {'targets': self.performance_encoder()} score_encoders = self.score_encoders() if score_encoders: if len(score_encoders) > 1: # Create a composite score encoder, only used for inference. encoders['inputs'] = music_encoders.CompositeScoreEncoder( [encoder for _, encoder in score_encoders]) else: # If only one score component, just use its encoder. _, encoders['inputs'] = score_encoders[0] return encoders
def testEncodeNoteSequence(self): encoder = music_encoders.CompositeScoreEncoder([ music_encoders.TextChordsEncoder(steps_per_quarter=4), music_encoders.TextMelodyEncoder(steps_per_quarter=4, min_pitch=21, max_pitch=108) ]) ns = music_pb2.NoteSequence() ns.tempos.add(qpm=60) testing_lib.add_chords_to_sequence(ns, [('C', 0.5), ('Dm', 1.0)]) testing_lib.add_track_to_sequence(ns, 0, [(60, 127, 0.0, 0.25), (62, 127, 0.25, 0.75), (64, 127, 1.25, 2.0)]) chord_ids, melody_ids = zip(*encoder.encode_note_sequence(ns)) expected_chord_ids = [ 2, # no-chord 2, # no-chord 3, # C major 3, # C major 17, # D minor 17, # D minor 17, # D minor 17 # D minor ] expected_melody_ids = [ 43, # ON(60) 45, # ON(62) 2, # HOLD(62) 3, # OFF(62) 2, # REST 47, # ON(64) 2, # HOLD(64) 2 # HOLD(64) ] self.assertEqual(expected_chord_ids, list(chord_ids)) self.assertEqual(expected_melody_ids, list(melody_ids))