예제 #1
0
파일: MEI2Solr.py 프로젝트: lexpar/Rodan
def getIntervals(semitones, pnames):
    """ Get quality (major, minor, etc.) invariant interval name and direction for example, an ascending 
        major second and an ascending minor second will both be encoded as 'u2'. the only tritone to occur is between 
        b and f, in the context of this application we will assume that the b will always be sung as b 
        flat. So a tritone found in the music is never encoded as a tritone in our database; it will instead always be 
        represented as either a fifth or a fourth, depending on inversion. If the one wishes to search for tritones, 
        they may use the semitones field.
    """
    intervals = []
    for z,interval in enumerate(semitones):
        if interval == 0:
            intervals.append('r')
        else:
            if interval > 0:
                direction = 'u'
            else:
                direction = 'd'
            if interval == 6:
                if pnames[z] == 'b':
                    size = 5
                else:
                    size = 4
            elif interval == -6:
                if pnames[z] == 'b':
                    size = 4
                else:
                    size = 5
            else: 
                size = abs(int(convertSemitoneToSpecifierGeneric(interval)[1]))

            intervals.append("{0}{1}".format(direction, str(size)))

    return "_".join(intervals)
    def transposed_score_and_metadata_tensors(self, chorale, semitone):
        '''
        Convert chorale to a tuple: (chorale_tensor, metadata_tensor)
        --> the original chorale is transposed semitone number of semi-tones
        '''
        # 1. Transpose:
        #    Compute the most "natural" interval given # of semi-tones
        #    - interval_type: "M"-major, "m"-minor, "A"-augmented, "P"-perfect, "d"-diminished
        #    - interval_step: 1st, 2nd, 3rd, 4th, ...
        #    ex) ~(4): C-C#, C#-D, D-D#, D#-E --> C-E --> ('M',3) : majord 3rd
        interval_type, interval_step = interval.convertSemitoneToSpecifierGeneric(
            semitone)
        interval_name = interval_type + str(interval_step)
        trans_interval = interval.Interval(interval_name)

        chorale_transposed = chorale.transpose(trans_interval)
        # .highestTime: end time of the note w/ highestOffset (= last note)
        # usually = .quarterLength
        chorale_tensor = self.chorale_to_tensor(
            chorale_transposed,
            offsetStart=0.,
            offsetEnd=chorale_transposed.flat.highestTime)

        metadatas_tensor = self.metadata_to_tensor(chorale_transposed)

        return chorale_tensor, metadatas_tensor
예제 #3
0
파일: MEI2Solr.py 프로젝트: agpar/Rodan
def getIntervals(semitones, pnames):
    """ Get quality (major, minor, etc.) invariant interval name and direction for example, an ascending 
        major second and an ascending minor second will both be encoded as 'u2'. the only tritone to occur is between 
        b and f, in the context of this application we will assume that the b will always be sung as b 
        flat. So a tritone found in the music is never encoded as a tritone in our database; it will instead always be 
        represented as either a fifth or a fourth, depending on inversion. If the one wishes to search for tritones, 
        they may use the semitones field.
    """
    intervals = []
    for z, interval in enumerate(semitones):
        if interval == 0:
            intervals.append('r')
        else:
            if interval > 0:
                direction = 'u'
            else:
                direction = 'd'
            if interval == 6:
                if pnames[z] == 'b':
                    size = 5
                else:
                    size = 4
            elif interval == -6:
                if pnames[z] == 'b':
                    size = 4
                else:
                    size = 5
            else:
                size = abs(int(convertSemitoneToSpecifierGeneric(interval)[1]))

            intervals.append("{0}{1}".format(direction, str(size)))

    return "_".join(intervals)
예제 #4
0
 def get_transpostion_interval_from_semitone(semi_tone):
     """
     Converts semi-tone to music21 interval
     :param semi_tone: int, -12 to +12
     :return: music21.Interval object
     """
     # compute the most "natural" interval given a number of semi-tones
     interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(
         semi_tone)
     transposition_interval = interval.Interval(
         str(interval_nature) + interval_type)
     return transposition_interval
def export_dataset(chorale_list, voice_ids=[SOP_INDEX], file_path=None):
    X = []
    index2notes, note2indexes = create_index_dicts(chorale_list,
                                                   voice_ids=voice_ids)

    min_max_midi_pitches = np.array(
        list(map(lambda d: _min_max_midi_pitch(d.values()), index2notes)))
    min_midi_pitches = min_max_midi_pitches[:, 0]
    max_midi_pitches = min_max_midi_pitches[:, 1]
    with open(file_path, 'w') as f:
        for chorale_file in tqdm(chorale_list):
            try:
                chorale = converter.parse(chorale_file)

                midi_pitches = [[
                    n.pitch.midi for n in chorale.parts[voice_id].flat.notes
                ] for voice_id in voice_ids]
                min_midi_pitches_current = np.array(
                    [min(l) for l in midi_pitches])
                max_midi_pitches_current = np.array(
                    [max(l) for l in midi_pitches])
                min_transposition = max(min_midi_pitches -
                                        min_midi_pitches_current)
                max_transposition = min(max_midi_pitches -
                                        max_midi_pitches_current)
                all_transpositions = []
                for semi_tone in range(min_transposition,
                                       max_transposition + 1):
                    try:
                        # necessary, won't transpose correctly otherwise
                        interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(
                            semi_tone)
                        transposition_interval = interval.Interval(
                            str(interval_nature) + interval_type)
                        chorale_tranposed = chorale.transpose(
                            transposition_interval)
                        inputs = chorale_to_inputs(chorale_tranposed,
                                                   voice_ids=voice_ids,
                                                   index2notes=index2notes,
                                                   note2indexes=note2indexes)
                        f.write(' '.join(
                            list(
                                map(lambda x: index2notes[SOP_INDEX][x],
                                    inputs[0]))))
                        f.write('\n')
                    except IndexError:
                        pass

            except (AttributeError, IndexError):
                pass
예제 #6
0
    def transposed_score_and_metadata_tensors(self, score, semi_tone):
        """
        Convert chorale to a couple (chorale_tensor, metadata_tensor),
        the original chorale is transposed semi_tone number of semi-tones
        :param chorale: music21 object
        :param semi_tone:
        :return: couple of tensors
        """
        # transpose
        # compute the most "natural" interval given a number of semi-tones
        interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(
            semi_tone)
        transposition_interval = interval.Interval(
            str(interval_nature) + interval_type)

        chorale_tranposed = score.transpose(transposition_interval)
        chorale_tensor = self.get_score_tensor(
            chorale_tranposed,
            offsetStart=0.,
            offsetEnd=chorale_tranposed.flat.highestTime)
        metadatas_transposed = self.get_metadata_tensor(chorale_tranposed)
        return chorale_tensor, metadatas_transposed
예제 #7
0
def make_dataset(chorale_list,
                 dataset_name,
                 voice_ids=voice_ids_default,
                 transpose=False,
                 metadatas=None):
    X = []
    X_metadatas = []
    index2notes, note2indexes = create_index_dicts(chorale_list,
                                                   voice_ids=voice_ids)

    # todo clean this part
    min_max_midi_pitches = np.array(
        list(map(lambda d: _min_max_midi_pitch(d.values()), index2notes)))
    min_midi_pitches = min_max_midi_pitches[:, 0]
    max_midi_pitches = min_max_midi_pitches[:, 1]
    for chorale_file in tqdm(chorale_list):
        try:
            chorale = converter.parse(chorale_file)
            if transpose:
                midi_pitches = [[
                    n.pitch.midi for n in chorale.parts[voice_id].flat.notes
                ] for voice_id in voice_ids]
                min_midi_pitches_current = np.array(
                    [min(l) for l in midi_pitches])
                max_midi_pitches_current = np.array(
                    [max(l) for l in midi_pitches])
                min_transposition = max(min_midi_pitches -
                                        min_midi_pitches_current)
                max_transposition = min(max_midi_pitches -
                                        max_midi_pitches_current)
                for semi_tone in range(min_transposition,
                                       max_transposition + 1):
                    try:
                        # necessary, won't transpose correctly otherwise
                        interval_type, interval_nature = interval.convertSemitoneToSpecifierGeneric(
                            semi_tone)
                        transposition_interval = interval.Interval(
                            str(interval_nature) + interval_type)
                        chorale_tranposed = chorale.transpose(
                            transposition_interval)
                        inputs = chorale_to_inputs(chorale_tranposed,
                                                   voice_ids=voice_ids,
                                                   index2notes=index2notes,
                                                   note2indexes=note2indexes)
                        md = []
                        if metadatas:
                            for metadata in metadatas:
                                # todo add this
                                if metadata.is_global:
                                    pass
                                else:
                                    md.append(
                                        metadata.evaluate(chorale_tranposed))
                        X.append(inputs)
                        X_metadatas.append(md)
                    except KeyError:
                        print('KeyError: File ' + chorale_file + ' skipped')
                    except FloatingKeyException:
                        print('FloatingKeyException: File ' + chorale_file +
                              ' skipped')
            else:
                print("Warning: no transposition! shouldn't be used!")
                inputs = chorale_to_inputs(chorale,
                                           voice_ids=voice_ids,
                                           index2notes=index2notes,
                                           note2indexes=note2indexes)
                X.append(inputs)

        except (AttributeError, IndexError):
            pass

    dataset = (X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas)
    pickle.dump(dataset, open(dataset_name, 'wb'), pickle.HIGHEST_PROTOCOL)
    print(str(len(X)) + ' files written in ' + dataset_name)