def part_to_tensor(self, part, part_id, offsetStart, offsetEnd):
        """

        :param part:
        :param part_id:
        :param offsetStart:
        :param offsetEnd:
        :return: torch IntTensor (1, length)
        """
        list_notes_and_rests = list(
            part.flat.getElementsByOffset(
                offsetStart=offsetStart,
                offsetEnd=offsetEnd,
                classList=[music21.note.Note, music21.note.Rest]))
        list_note_strings = [
            n.nameWithOctave for n in list_notes_and_rests if n.isNote
        ]
        length = int((offsetEnd - offsetStart) * self.subdivision)  # in ticks

        # add entries to dictionaries if not present
        # should only be called by make_dataset when transposing
        for note_name in list_note_strings:
            note2index = self.note2index_dicts[part_id]
            index2note = self.index2note_dicts[part_id]
            if note_name not in note2index:
                new_index = len(note2index)
                index2note.update({new_index: note_name})
                note2index.update({note_name: new_index})
                print('Warning: Entry ' + str({new_index: note_name}) +
                      ' added to dictionaries')

        # construct sequence
        j = 0
        i = 0
        t = np.zeros((length, 2))
        is_articulated = True
        num_notes = len(list_notes_and_rests)
        while i < length:
            if j < num_notes - 1:
                if list_notes_and_rests[
                        j + 1].offset > i / self.subdivision + offsetStart:
                    t[i, :] = [
                        note2index[standard_name(list_notes_and_rests[j])],
                        is_articulated
                    ]
                    i += 1
                    is_articulated = False
                else:
                    j += 1
                    is_articulated = True
            else:
                t[i, :] = [
                    note2index[standard_name(list_notes_and_rests[j])],
                    is_articulated
                ]
                i += 1
                is_articulated = False
        seq = t[:, 0] * t[:, 1] + (1 - t[:, 1]) * note2index[SLUR_SYMBOL]
        tensor = torch.from_numpy(seq).long()[None, :]
        return tensor
예제 #2
0
    def compute_index_dicts(self):
        print('Computing index dicts')
        self.index2note_dicts = [
            {} for _ in range(self.num_voices)
        ]
        self.note2index_dicts = [
            {} for _ in range(self.num_voices)
        ]

        # create and add additional symbols
        note_sets = [set() for _ in range(self.num_voices)]
        for note_set in note_sets:
            note_set.add(SLUR_SYMBOL)
            note_set.add(START_SYMBOL)
            note_set.add(END_SYMBOL)
            note_set.add(REST_SYMBOL)
            note_set.add(PAD_SYMBOL)

        # get all notes: used for computing pitch ranges
        for chorale in tqdm(self.iterator_gen()):
            for part_id, part in enumerate(chorale.parts[:self.num_voices]):
                for n in part.flat.notesAndRests:
                    note_sets[part_id].add(standard_name(n))

        # create tables
        for note_set, index2note, note2index in zip(note_sets,
                                                    self.index2note_dicts,
                                                    self.note2index_dicts):
            for note_index, note in enumerate(note_set):
                index2note.update({note_index: note})
                note2index.update({note: note_index})
예제 #3
0
    def compute_index_dicts(self):
        if os.path.exists(self.dict_path):
            print('Dictionaries already exists. Reading them now')
            f = open(self.dict_path, 'r')
            dicts = [line.rstrip('\n') for line in f]
            assert (len(dicts) == 2)  # must have 2 dictionaries
            self.index2note_dicts = eval(dicts[0])
            self.note2index_dicts = eval(dicts[1])
            return

        # self.compute_beatmarker_dicts()
        print('Computing note index dicts')
        self.index2note_dicts = [{} for _ in range(self.num_voices)]
        self.note2index_dicts = [{} for _ in range(self.num_voices)]

        # create and add additional symbols
        note_sets = [set() for _ in range(self.num_voices)]
        for note_set in note_sets:
            note_set.add(SLUR_SYMBOL)
            note_set.add(START_SYMBOL)
            note_set.add(END_SYMBOL)
            # note_set.add(PAD_SYMBOL)

        # get all notes
        # iteratre through all scores and fill in the notes
        # for tune_filepath in tqdm(self.valid_tune_filepaths):
        count = 0
        for _, score in tqdm(enumerate(self.corpus_it_gen())):
            # score = self.get_score_from_path(tune_filepath)
            # part is either lead or chords as lists
            if count > self.num_melodies:
                break
            count += 1
            for part_id, part in enumerate(notes_and_chords(score)):
                for n in part:
                    note_sets[part_id].add(standard_name(n))

        # create tables
        for note_set, index2note, note2index in zip(note_sets,
                                                    self.index2note_dicts,
                                                    self.note2index_dicts):
            for note_index, note in enumerate(note_set):
                index2note.update({note_index: note})
                note2index.update({note: note_index})

        # write as text file for use later
        self.update_index_dicts()
예제 #4
0
    def notes_to_lead_tensor(self,
                             notes,
                             length: int,
                             update_dicts: bool = False):
        eps = 1e-4

        # LEAD
        j = 0
        i = 0
        t = np.zeros((length, 2))
        is_articulated = True
        num_notes = len(notes)
        current_tick = 0

        note2index = self.symbol2index_dicts[self.NOTES]
        index2note = self.index2symbol_dicts[self.NOTES]
        while i < length:
            # update dicts when creating the dataset
            note_name = standard_name(notes[j])
            if update_dicts and note_name not in note2index:
                new_index = len(note2index)
                note2index[note_name] = new_index
                index2note[new_index] = note_name

            note_index = note2index[note_name]
            if j < num_notes - 1:
                if notes[j + 1].offset > current_tick + eps:
                    t[i, :] = [note_index, is_articulated]
                    i += 1
                    current_tick += self.tick_durations[(i - 1) %
                                                        len(self.tick_values)]
                    is_articulated = False
                else:
                    j += 1
                    is_articulated = True
            else:
                t[i, :] = [note_index, is_articulated]
                i += 1
                is_articulated = False
        lead = t[:, 0] * t[:, 1] + (1 - t[:, 1]) * note2index[SLUR_SYMBOL]
        lead_tensor = torch.from_numpy(lead).long()[None, :]

        return lead_tensor
예제 #5
0
    def chords_to_roots_and_types_tensors(self,
                                          chords,
                                          length,
                                          update_dicts: bool = False):
        # CHORDS
        j = 0
        i = 0
        t = np.zeros((length, 2))
        u = np.zeros((length, 2))
        is_articulated = True
        num_chords = len(chords)
        chordroot2index = self.symbol2index_dicts[self.CHORD_ROOT]
        index2chordroot = self.index2symbol_dicts[self.CHORD_ROOT]
        chordname2index = self.symbol2index_dicts[self.CHORD_NAME]
        index2chordname = self.index2symbol_dicts[self.CHORD_NAME]
        while i < length:
            # check if JazzChord
            if isinstance(chords[j], JazzChord):
                # update dicts when creating the dataset
                chord_root = standard_name(chords[j]).split(',')[0]
                chord_name = chords[j].chord_name
                if update_dicts and chord_root not in chordroot2index:
                    new_index = len(chordroot2index)
                    chordroot2index[chord_root] = new_index
                    index2chordroot[new_index] = chord_root
                chord_root_index = chordroot2index[chord_root]
                if update_dicts and chord_name not in chordname2index:
                    new_index = len(chordname2index)
                    chordname2index[chord_name] = new_index
                    index2chordname[new_index] = chord_name
                chord_name_index = chordname2index[chord_name]
            elif isinstance(chords[j], music21.expressions.TextExpression):
                content = chords[j].content
                if update_dicts and content not in chordroot2index:
                    new_index = len(chordroot2index)
                    chordroot2index[content] = new_index
                    index2chordroot[new_index] = content
                if update_dicts and content not in chordname2index:
                    new_index = len(chordname2index)
                    chordname2index[content] = new_index
                    index2chordname[new_index] = content
                chord_root_index = chordroot2index[content]
                chord_name_index = chordname2index[content]

            if j < num_chords - 1:
                if chords[j + 1].offset > i:
                    t[i, :] = [chord_root_index, is_articulated]
                    u[i, :] = [chord_name_index, is_articulated]
                    i += 1
                    is_articulated = False
                else:
                    j += 1
                    is_articulated = True
            else:
                t[i, :] = [chord_root_index, is_articulated]
                u[i, :] = [chord_name_index, is_articulated]
                i += 1
                is_articulated = False

        # TODO no SLUR_SYMBOL for chords?!
        # seq = t[:, 0] * t[:, 1] + (1 - t[:, 1]) * chordroot2index[SLUR_SYMBOL]
        seq = t[:, 0]
        chord_root_tensor = torch.from_numpy(seq).long()[None, :]
        seq = u[:, 0]
        # seq = u[:, 0] * u[:, 1] + (1 - u[:, 1]) * chordname2index[SLUR_SYMBOL]
        chord_name_tensor = torch.from_numpy(seq).long()[None, :]

        return chord_root_tensor, chord_name_tensor
예제 #6
0
    def get_score_tensor(self, score):
        """
        Extract the lead tensor from the lead sheet
        :param score: music21 score object
        :return: lead_tensor
        """
        eps = 1e-4
        notes, _ = notes_and_chords(score)
        if not score_on_ticks(score, self.tick_values):
            raise LeadsheetParsingException(
                f'Score {score.metadata.title} has notes not on ticks')

        # add entries to dictionaries if not present
        # should only be called by make_tensor_dataset when transposing
        list_note_strings_and_pitches = [(n.nameWithOctave, n.pitch.midi)
                                         for n in notes if n.isNote]
        note2index = self.note2index_dicts[self.NOTES]
        index2note = self.index2note_dicts[self.NOTES]
        pitch_range = self.pitch_range
        min_pitch, max_pitch = pitch_range
        for note_name, pitch in list_note_strings_and_pitches:
            # if out of range
            if pitch < min_pitch or pitch > max_pitch:
                note_name = OUT_OF_RANGE
            if note_name not in note2index:
                new_index = len(note2index)
                index2note.update({new_index: note_name})
                note2index.update({note_name: new_index})
                print('Warning: Entry ' + str({new_index: note_name}) +
                      ' added to dictionaries')

                #raise ValueError
                self.update_index_dicts()

        # construct sequence
        j = 0
        i = 0
        length = int(score.highestTime * self.subdivision)
        t = np.zeros((length, 2))
        is_articulated = True
        num_notes = len(notes)
        current_tick = 0
        while i < length:
            if j < num_notes - 1:
                if notes[j + 1].offset > current_tick + eps:
                    t[i, :] = [
                        note2index[standard_name(notes[j])], is_articulated
                    ]
                    i += 1
                    current_tick += self.tick_durations[(i - 1) %
                                                        len(self.tick_values)]
                    is_articulated = False
                else:
                    j += 1
                    is_articulated = True
            else:
                t[i, :] = [note2index[standard_name(notes[j])], is_articulated]
                i += 1
                is_articulated = False
        lead = t[:, 0] * t[:, 1] + (1 - t[:, 1]) * note2index[SLUR_SYMBOL]
        # convert to torch tensor
        lead_tensor = torch.from_numpy(lead).long()[None, :]
        return lead_tensor  # , chord_tensor