def tensor_to_score(self, tensor_score): """ Converts lead given as tensor_lead to a true music21 score :param tensor_score: :return: """ slur_index = self.note2index_dicts[self.NOTES][SLUR_SYMBOL] score = music21.stream.Score() part = music21.stream.Part() # LEAD dur = 0 f = music21.note.Rest() tensor_lead_np = tensor_score.numpy().flatten() for tick_index, note_index in enumerate(tensor_lead_np): # if it is a played note if not note_index == slur_index: # add previous note if dur > 0: f.duration = music21.duration.Duration(dur) part.append(f) dur = self.tick_durations[tick_index % self.subdivision] f = standard_note( self.index2note_dicts[self.NOTES][note_index]) else: dur += self.tick_durations[tick_index % self.subdivision] # add last note f.duration = music21.duration.Duration(dur) part.append(f) score.insert(part) return score
def tensor_to_score(self, tensor_score): """ :param tensor_score: (num_voices, length) :return: music21 score object """ slur_indexes = [ note2index[SLUR_SYMBOL] for note2index in self.note2index_dicts ] score = music21.stream.Score() for voice_index, (voice, index2note, slur_index) in enumerate( zip(tensor_score, self.index2note_dicts, slur_indexes)): part = stream.Part(id='part' + str(voice_index)) dur = 0 f = music21.note.Rest() for note_index in [n.item() for n in voice]: # if it is a played note if not note_index == slur_indexes[voice_index]: # add previous note if dur > 0: f.duration = music21.duration.Duration( dur / self.subdivision) part.append(f) dur = 1 f = standard_note(index2note[note_index]) else: dur += 1 # add last note f.duration = music21.duration.Duration(dur / self.subdivision) part.append(f) score.insert(part) return score
def compute_voice_ranges(self): assert self.index2note_dicts is not None assert self.note2index_dicts is not None self.voice_ranges = [] print('Computing voice ranges') for voice_index, note2index in tqdm(enumerate(self.note2index_dicts)): notes = [standard_note(note_string) for note_string in note2index] midi_pitches = [n.pitch.midi for n in notes if n.isNote] min_midi, max_midi = min(midi_pitches), max(midi_pitches) self.voice_ranges.append((min_midi, max_midi))
def tensor_to_score(self, tensor_score, fermata_tensor=None): """ :param tensor_score: (num_voices, length) :return: music21 score object """ slur_indexes = [ note2index[SLUR_SYMBOL] for note2index in self.note2index_dicts ] score = music21.stream.Score() num_voices = tensor_score.size(0) name_parts = (num_voices == 4) part_names = ['Soprano', 'Alto', 'Tenor', 'Bass'] for voice_index, (voice, index2note, slur_index) in enumerate( zip(tensor_score, self.index2note_dicts, slur_indexes)): add_fermata = False if name_parts: part = stream.Part(id=part_names[voice_index], partName=part_names[voice_index], partAbbreviation=part_names[voice_index], instrumentName=part_names[voice_index]) else: part = stream.Part(id='part' + str(voice_index)) dur = 0 total_duration = 0 f = music21.note.Rest() for note_index in [n.item() for n in voice]: # if it is a played note if not note_index == slur_indexes[voice_index]: # add previous note if dur > 0: f.duration = music21.duration.Duration( dur / self.subdivision) if add_fermata: f.expressions.append(music21.expressions.Fermata()) add_fermata = False part.append(f) dur = 1 f = standard_note(index2note[note_index]) if fermata_tensor is not None and voice_index == 0: if fermata_tensor[0, total_duration] == 1: add_fermata = True else: add_fermata = False total_duration += 1 else: dur += 1 total_duration += 1 # add last note f.duration = music21.duration.Duration(dur / self.subdivision) if add_fermata: f.expressions.append(music21.expressions.Fermata()) add_fermata = False part.append(f) score.insert(part) return score
def tensor_to_score(self, tensor_score, tensor_chords, realize_chords=False, add_chord_symbols=False): """ Converts given leadsheet as tensor_lead and tensor_chords to a true music21 score :param tensor_lead: :param tensor_chords: :return: """ score = music21.stream.Score() part = music21.stream.Part() # LEAD dur = 0 f = music21.note.Rest() tensor_score_np = tensor_score.numpy().flatten() slur_index = self.symbol2index_dicts[self.NOTES][SLUR_SYMBOL] for tick_index, note_index in enumerate(tensor_score_np): note_index = note_index.item() # if it is a played note if not note_index == slur_index: # add previous note if dur > 0: f.duration = music21.duration.Duration(dur) part.append(f) # TODO two types of tick_durations dur = self.tick_durations[tick_index % self.subdivision] f = standard_note( self.index2symbol_dicts[self.NOTES][note_index]) else: dur += self.tick_durations[tick_index % self.subdivision] # add last note f.duration = music21.duration.Duration(dur) part.append(f) # CHORD SYMBOLS if add_chord_symbols: # index2chordroot = self.index2symbol_dicts[self.CHORD_ROOT] chordroot2index = self.symbol2index_dicts[self.CHORD_ROOT] # index2chordname = self.index2symbol_dicts[self.CHORD_NAME] # chordname2index = self.symbol2index_dicts[self.CHORD_NAME] start_index = chordroot2index[START_SYMBOL] end_index = chordroot2index[END_SYMBOL] slur_index = chordroot2index[SLUR_SYMBOL] pad_index = chordroot2index[PAD_SYMBOL] nc_index = chordroot2index[NC] chordtype2index = self.symbol2index_dicts[self.CHORD_NAME] # index2chordname = self.index2symbol_dicts[self.CHORD_NAME] # chordname2index = self.symbol2index_dicts[self.CHORD_NAME] type_start_index = chordtype2index[START_SYMBOL] type_end_index = chordtype2index[END_SYMBOL] type_slur_index = chordtype2index[SLUR_SYMBOL] type_pad_index = chordtype2index[PAD_SYMBOL] type_nc_index = chordtype2index[NC] tensor_chords_root, tensor_chords_name = tensor_chords tensor_chords_root_np = tensor_chords_root.numpy().flatten() tensor_chords_name_np = tensor_chords_name.numpy().flatten() for beat_index, (chord_root_index, chord_type_index) \ in enumerate( zip( tensor_chords_root_np, tensor_chords_name_np ) ): chord_root_index = chord_root_index.item() chord_type_index = chord_type_index.item() # if it is a played chord # todo check also chord_type_index! if (chord_root_index not in [ slur_index, start_index, end_index, pad_index, nc_index ] and chord_type_index not in [ type_slur_index, type_start_index, type_end_index, type_pad_index, type_nc_index ]): # add chord jazz_chord = self.get_jazzchord_from_index( chord_root_index, chord_type_index) part.insert(beat_index, jazz_chord) score.append(part) else: score.append(part) if realize_chords: # index2chordroot = self.index2symbol_dicts[self.CHORD_ROOT] chordroot2index = self.symbol2index_dicts[self.CHORD_ROOT] start_index = chordroot2index[START_SYMBOL] end_index = chordroot2index[END_SYMBOL] slur_index = chordroot2index[SLUR_SYMBOL] pad_index = chordroot2index[PAD_SYMBOL] nc_index = chordroot2index[NC] chords_part = music21.stream.Part() dur = 0 c = music21.note.Rest() tensor_chords_root, tensor_chords_name = tensor_chords tensor_chords_root_np = tensor_chords_root.numpy().flatten() tensor_chords_name_np = tensor_chords_name.numpy().flatten() for (beat_index, (chord_root_index, chord_type_index)) \ in enumerate( zip( tensor_chords_root_np, tensor_chords_name_np ) ): chord_root_index = chord_root_index.item() chord_type_index = chord_type_index.item() # if it is a played note if chord_root_index not in [ slur_index, start_index, end_index, pad_index, nc_index ]: # add previous note if dur > 0: c.duration = music21.duration.Duration(dur) chords_part.append(c) dur = 1 try: jazz_chord = self.get_jazzchord_from_index( chord_root_index, chord_type_index) voicing_pitch_list = jazz_chord.get_pitchlist_from_chord( ) c = music21.chord.Chord( [p.transpose(-12) for p in voicing_pitch_list]) except: c = music21.note.Rest() else: dur += 1 # add last note c.duration = music21.duration.Duration(dur) chords_part.append(c) score.append(chords_part) return score