Ejemplo n.º 1
0
 def _get_noterest(self):
     """Used internally by get() to cache and retrieve results from the
     noterest.NoteRestIndexer."""
     if 'noterest' not in self._analyses:
         self._analyses['noterest'] = noterest.NoteRestIndexer(
             self._get_m21_nrc_objs_no_tied()).run()
     return self._analyses['noterest']
Ejemplo n.º 2
0
def all_melodies(file):
    file = music21.converter.parse(file)
    notes = noterest.NoteRestIndexer(file).run()
    h_ints = interval.HorizontalIntervalIndexer(notes).run()

    voices = ['0', '1', '2', '3']
    settings = {'n': 4, 'vertical': voices}
    ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run()

    combined = pandas.concat([notes, ngrams], axis=1)

    voices_list = []
    for voice in voices:
        voice_dict = {}
        for melody in ngrams['new_ngram.NewNGramIndexer'][voice].dropna(
        ).tolist():
            condition = combined.loc[:, 'new_ngram.NewNGramIndexer'][
                voice] == melody
            location = combined.loc[:, 'noterest.NoteRestIndexer'][voice]
            if melody not in voice_dict:
                voice_dict[melody] = location[condition].tolist()
        voices_list.append(voice_dict)

    melo_dict = {}
    for dictionary in voices_list:
        for entry in dictionary:
            if entry in melo_dict:
                melo_dict[entry].extend(dictionary[entry])
            else:
                melo_dict[entry] = dictionary[entry]

    return melo_dict
Ejemplo n.º 3
0
 def test_noterest_indexer_3(self):
     # When there are a bunch of notes
     expected = pandas.DataFrame(
         {'0': pandas.Series([u'C4' for _ in range(10)])})
     test_score = pandas.DataFrame(
         {'0': pandas.Series([note.Note('C4') for i in range(10)])})
     nr_indexer = noterest.NoteRestIndexer(test_score)
     actual = nr_indexer.run()['noterest.NoteRestIndexer']
     self.assertTrue(actual.equals(expected))
Ejemplo n.º 4
0
 def test_noterest_indexer_1(self):
     # When the parts are empty
     expected = pandas.DataFrame({
         '0': pandas.Series(),
         '1': pandas.Series()
     })
     nr_indexer = noterest.NoteRestIndexer(expected)
     actual = nr_indexer.run()['noterest.NoteRestIndexer']
     self.assertTrue(actual.equals(expected))
Ejemplo n.º 5
0
def from_melody(file, melody):
    file = music21.converter.parse(file)

    notes = noterest.NoteRestIndexer(file).run()
    h_ints = interval.HorizontalIntervalIndexer(notes).run()

    voices = ['0', '1', '2', '3']
    settings = {'n': 4, 'vertical': voices}
    ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run()

    combined = pandas.concat([notes, ngrams], axis=1)

    all_voices = []
    for voice in voices:
        condition = combined.loc[:,
                                 'new_ngram.NewNGramIndexer'][voice] == melody
        location = combined.loc[:, 'noterest.NoteRestIndexer'][voice]
        all_voices.extend(location[condition].tolist())
    return all_voices
Ejemplo n.º 6
0
def find_permutations(file, motif):

    file = music21.converter.parse(file)
    notes = noterest.NoteRestIndexer(file).run()
    h_ints = interval.HorizontalIntervalIndexer(notes).run()

    voices = ['0', '1', '2', '3']
    settings = {'n': 4, 'vertical': voices}

    ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run()

    perms1 = _permute(motif)
    perms = []
    for motif in perms1:
        perms.append(' '.join(motif))
    perms = list(set(perms))

    for motif in perms:
        condition = ngrams['new_ngram.NewNGramIndexer'] == motif
        print(notes[condition].dropna(how='all'))
def main():
    piece_path = "/home/amor/Code/vizitka/vis/tests/corpus/Kyrie.krn"
    # piece_path = "/home/amor/Code/vizitka/vis/tests/corpus/bach.xml"
    # piece_path = "/home/amor/Code/vizitka/vis/tests/corpus/bwv603.xml"
    # piece_path = '/home/amor/Code/vizitka/vis/tests/corpus/Reimenschnieder/1-026900B_.xml'
    #piece_path = '/home/amor/Code/vizitka/vis/tests/corpus/Jos2308.mei'
    # piece_path = '/home/amor/Code/vizitka/vis/tests/corpus/Sanctus.krn'
    ind_piece = IndexedPiece(piece_path)
    test_piece = ind_piece._import_score()
    test_parts = test_piece.parts

    # bwv603 = converter.parse(os.path.join(VIS_PATH, 'tests', 'corpus/bwv603.xml'))
    # test_part = [bwv603.parts[0], bwv603.parts[1], bwv603.parts[2], bwv603.parts[3]]

    setts = {'quality': True, 'simple or compound': 'simple'}
    horiz_setts = {'quality': False, 'simple or compound': 'compound'}

    t0 = time.time()
    actual = noterest.NoteRestIndexer(test_parts).run()

    # filter_setts = {'quarterLength': 2.0, 'method':None}
    # filtered_results = offset.FilterByOffsetIndexer(actual, filter_setts).run()
    # pdb.set_trace()
    dur_ind = meter.DurationIndexer(test_parts).run()
    bs_ind = meter.NoteBeatStrengthIndexer(test_parts).run()
    horiz = interval.HorizontalIntervalIndexer(actual, horiz_setts).run()
    # ind_piece._analyses['noterest'] = actual
    # h_df = ind_piece._get_h_ints(settings=horiz_setts)
    vert_ints = interval.IntervalIndexer(actual, setts).run()
    dissonances = dissonance.DissonanceIndexer(
        [bs_ind, dur_ind, horiz, vert_ints]).run()

    t1 = time.time()
    print('Time taken to run all indexers: ')
    print(t1 - t0)

    pdb.set_trace()
Ejemplo n.º 8
0
    'n': 3,
    'continuer': 'P1',
    'horizontal': 'lowest',
    'vertical': 'all',
    'terminator': ['Rest'],
    'open-ended': False,
    'brackets': False
}
w = 6  # w is for window
ends = []

for number, piece_path in enumerate(pieces):
    ind_piece = IndexedPiece(piece_path)
    piece = ind_piece._import_score()
    parts = piece.parts
    nr = noterest.NoteRestIndexer(parts).run()
    dr = meter.DurationIndexer(parts).run()
    ms = meter.MeasureIndexer(parts).run()
    bs = meter.NoteBeatStrengthIndexer(parts).run()
    hz = interval.HorizontalIntervalIndexer(nr, h_setts).run()
    hz2 = interval.HorizontalIntervalIndexer(nr, h_setts2).run()
    vt = interval.IntervalIndexer(nr, v_setts).run()
    # ng = new_ngram.NewNGramIndexer((vt, hz2), n_setts).run()
    ds = dissonance.DissonanceIndexer([bs, dr, hz, vt]).run()
    av = active_voices.ActiveVoicesIndexer(nr).run()
    av_sa = active_voices.ActiveVoicesIndexer(nr, {'show_all': True}).run()

    voice_dr_means = []
    # Attack-density analysis each voice. The whole-piece analysis is probably what matters most though.
    for x in range(len(nr.columns)):
        mask = nr.iloc[:, x].dropna()