def test_noterest_indexer_3(self): # When there are a bunch of notes expected = pandas.DataFrame({'0': pandas.Series([u'C4' for _ in range(10)])}) test_score = pandas.DataFrame({'0': pandas.Series([note.Note('C4') for i in range(10)])}) nr_indexer = noterest.NoteRestIndexer(test_score) actual = nr_indexer.run()['noterest.NoteRestIndexer'] self.assertTrue(actual.equals(expected))
def all_melodies(file): file = music21.converter.parse(file) notes = noterest.NoteRestIndexer(file).run() h_ints = interval.HorizontalIntervalIndexer(notes).run() voices = ['0', '1', '2', '3'] settings = {'n': 4, 'vertical': voices} ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run() combined = pandas.concat([notes, ngrams], axis=1) voices_list = [] for voice in voices: voice_dict = {} for melody in ngrams['new_ngram.NewNGramIndexer'][voice].dropna( ).tolist(): condition = combined.loc[:, 'new_ngram.NewNGramIndexer'][ voice] == melody location = combined.loc[:, 'noterest.NoteRestIndexer'][voice] if melody not in voice_dict: voice_dict[melody] = location[condition].tolist() voices_list.append(voice_dict) melo_dict = {} for dictionary in voices_list: for entry in dictionary: if entry in melo_dict: melo_dict[entry].extend(dictionary[entry]) else: melo_dict[entry] = dictionary[entry] return melo_dict
def v_notes(piece): the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() setts = {'quarterLength': 0.5, 'method': 'ffill'} off = offset.FilterByOffsetIndexer(the_notes, setts).run() all_notes = [] for i in range(len(the_score.parts)): notes = off['offset.FilterByOffsetIndexer'][str(i)] part_notes = [] for note in notes: note = str(note) if note == 'nan': pass elif note == 'Rest': pass else: part_notes.append(note[:-1]) all_notes.append(part_notes) vert = [] for note in zip(*all_notes): new = list(set(note)) new.sort(cmp=lambda y, z: cmp(y[0], z[0])) new = ' '.join(new) vert.append(new) _make_graph(vert, 'vertnotes')
def ngram_count(piece, settings, n): ngrams = [] ngram_freq = {} ind_piece = IndexedPiece(piece) the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() horiz_setts = settings # horiz-attach-later has to be true for the ngram indexer to work horiz_setts['horiz_attach_later'] = True horiz = interval.HorizontalIntervalIndexer(the_notes, horiz_setts).run() vert = interval.IntervalIndexer(the_notes, settings).run() intls = pandas.concat([horiz, vert], axis=1) parts = len(the_score.parts) # gets ngrams between all possible combinations of parts for x in range(parts): setts = { 'mark singles': False, 'continuer': '1', 'n': n, 'horizontal': [('interval.HorizontalIntervalIndexer', str(x))] } for i in range(x + 1, parts, 1): num = str(x) + ',' + str(i) setts['vertical'] = [('interval.IntervalIndexer', num)] ngram_test = ind_piece.get_data([ngram.NGramIndexer], setts, intls) ngrams.extend(ngram_test.values.tolist()) # count ngrams for my_ngram in ngrams: my_ngram = str(' '.join(my_ngram)) if 'Rest' in my_ngram: pass elif my_ngram in ngram_freq.keys(): ngram_freq[my_ngram] += 1 else: ngram_freq[my_ngram] = 1 return ngram_freq
def parts(piece, title): the_score = music21.converter.parse(piece) num_parts = len(the_score.parts) the_notes = noterest.NoteRestIndexer(the_score).run() for i in range(num_parts): notes = the_notes['noterest.NoteRestIndexer'][str(i)] part_notes = [] for note in notes: part_notes.append(str(note)) if 'nan' in part_notes: while 'nan' in part_notes: part_notes.remove('nan') _make_graph(part_notes, title + '-part' + str(i))
def from_melody(file, melody): file = music21.converter.parse(file) notes = noterest.NoteRestIndexer(file).run() h_ints = interval.HorizontalIntervalIndexer(notes).run() voices = ['0', '1', '2', '3'] settings = {'n': 4, 'vertical': voices} ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run() combined = pandas.concat([notes, ngrams], axis=1) all_voices = [] for voice in voices: condition = combined.loc[:, 'new_ngram.NewNGramIndexer'][voice] == melody location = combined.loc[:, 'noterest.NoteRestIndexer'][voice] all_voices.extend(location[condition].tolist()) return all_voices
def find_permutations(file, motif): file = music21.converter.parse(file) notes = noterest.NoteRestIndexer(file).run() h_ints = interval.HorizontalIntervalIndexer(notes).run() voices = ['0', '1', '2', '3'] settings = {'n': 4, 'vertical': voices} ngrams = new_ngram.NewNGramIndexer([h_ints], settings).run() perms1 = _permute(motif) perms = [] for motif in perms1: perms.append(' '.join(motif)) perms = list(set(perms)) for motif in perms: condition = ngrams['new_ngram.NewNGramIndexer'] == motif print(notes[condition].dropna(how='all'))
def find_transformations(file, motif): file = music21.converter.parse(file) notes = noterest.NoteRestIndexer(file).run() h_ints = interval.HorizontalIntervalIndexer(notes).run() voices = ['0', '1', '2', '3'] settings = {'n': 4, 'vertical': voices} motifs = new_ngram.NewNGramIndexer([h_ints], settings).run() condition = motifs['new_ngram.NewNGramIndexer'] == motif print(notes[condition].dropna(how='all')) inv = _find_inv(motif) condition = motifs['new_ngram.NewNGramIndexer'] == inv print(notes[condition].dropna(how='all')) ret = _find_ret(motif) condition = motifs['new_ngram.NewNGramIndexer'] == ret print(notes[condition].dropna(how='all'))
def whole_piece(piece, title): the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() all_notes = [] for i in range(len(the_score.parts)): notes = the_notes['noterest.NoteRestIndexer'][str(i)] part_notes = [] for note in notes: note = str(note) if note == 'nan': pass else: part_notes.append(note) all_notes.append(part_notes) _multi_color(all_notes, title)
def main(): piece_path = "/home/amor/Code/vis-framework/vis/tests/corpus/Kyrie.krn" # piece_path = "/home/amor/Code/vis-framework/vis/tests/corpus/bach.xml" # piece_path = "/home/amor/Code/vis-framework/vis/tests/corpus/bwv603.xml" # piece_path = '/home/amor/Code/vis-framework/vis/tests/corpus/Reimenschnieder/1-026900B_.xml' #piece_path = '/home/amor/Code/vis-framework/vis/tests/corpus/Jos2308.mei' # piece_path = '/home/amor/Code/vis-framework/vis/tests/corpus/Sanctus.krn' ind_piece = IndexedPiece(piece_path) test_piece = ind_piece._import_score() test_parts = test_piece.parts # bwv603 = converter.parse(os.path.join(VIS_PATH, 'tests', 'corpus/bwv603.xml')) # test_part = [bwv603.parts[0], bwv603.parts[1], bwv603.parts[2], bwv603.parts[3]] setts = {'quality': True, 'simple or compound': 'simple'} horiz_setts = {'quality': False, 'simple or compound': 'compound'} t0 = time.time() actual = noterest.NoteRestIndexer(test_parts).run() # filter_setts = {'quarterLength': 2.0, 'method':None} # filtered_results = offset.FilterByOffsetIndexer(actual, filter_setts).run() # pdb.set_trace() dur_ind = meter.DurationIndexer(test_parts).run() bs_ind = meter.NoteBeatStrengthIndexer(test_parts).run() horiz = interval.HorizontalIntervalIndexer(actual, horiz_setts).run() # ind_piece._analyses['noterest'] = actual # h_df = ind_piece._get_h_ints(settings=horiz_setts) vert_ints = interval.IntervalIndexer(actual, setts).run() dissonances = dissonance.DissonanceIndexer( [bs_ind, dur_ind, horiz, vert_ints]).run() t1 = time.time() print('Time taken to run all indexers: ') print(t1 - t0) pdb.set_trace()
def given_mel(melody, piece, off_setts, intl_setts, percent): result = 0 the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() off = offset.FilterByOffsetIndexer(the_notes, off_setts).run() horiz = interval.HorizontalIntervalIndexer(off, intl_setts).run() for x in range(len(the_score.parts)): part_ints = (horiz['interval.HorizontalIntervalIndexer', str(x)]) part_ints = part_ints.tolist() for i in range(0, len(part_ints), 1): part_ints[i] = str(part_ints[i]) while 'nan' in part_ints: part_ints.remove('nan') result += _compare(melody, part_ints, percent) return result
def motivic_count(piece, intl_setts, settings): the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() horiz = interval.HorizontalIntervalIndexer(the_notes, intl_setts).run() int_dict = {} transpose_dict = {} for n in range(len(the_score.parts)): # find notes in order to be able to find the transposition later my_notes = the_notes['noterest.NoteRestIndexer'][str(n)].tolist() intls = (horiz['interval.HorizontalIntervalIndexer', str(n)]).tolist() # convert each to string for i in range(len(intls)): intls[i] = str(intls[i]) for i in range(len(my_notes)): my_notes[i] = str(my_notes[i]) # remove 'nan' from both lists while 'nan' in intls: intls.remove('nan') while 'nan' in my_notes: my_notes.remove('nan') last_pos = (len(intls) - 1) # function to add the motifs to both dictionaries def add_to(the_motif, pos): notes = ' '.join(the_motif) notes = "'" + notes + "'" if notes in int_dict: int_dict[notes][0] += 1 transpose_dict[notes].append(my_notes[pos]) else: int_dict[notes] = [1] transpose_dict[notes] = [my_notes[pos]] # create motif of a given length starting on each possible note for i in range(last_pos - settings['length']): motif = [] for x in range(settings['length']): motif.append(intls[i + x]) if 'Rest' in motif: pass else: add_to(motif, i) which_dict = { 'count': int_dict, 'transpositions': transpose_dict, 'both': [int_dict, transpose_dict] } if settings['which'] in which_dict: return which_dict[settings['which']] else: return
av_setts = {'show_all': True} v_setts = {'quality': True, 'simple or compound': 'simple', 'directed': True, 'mp': False} h_setts = {'quality': False, 'horiz_attach_later': False, 'simple or compound': 'simple', 'directed': True, 'mp': False} n_setts = {'n': 5, 'continuer': 'P1', 'horizontal': 'lowest', 'vertical': [('0,4',)], 'terminator': ['Rest'], 'open-ended': False, 'brackets': False} n_setts_2 = {'n': 5, 'continuer': 'P1', 'vertical': 'all', 'terminator': ['Rest'], 'open-ended': False, 'brackets': False} n_setts_3 = {'n': 2, 'continuer': 'P1', 'vertical': [('0,4',)], 'terminator': [], 'open-ended': False, 'brackets': False} # pieces = (IndexedPiece(piece_path2), ind_piece) # corpus = AggregatedPieces(pieces) pdb.set_trace() nr = noterest.NoteRestIndexer(parts).run() dr = meter.DurationIndexer(parts).run() ms = meter.MeasureIndexer(parts).run() bs = meter.NoteBeatStrengthIndexer(parts).run() t0 = time.time() hz = interval.HorizontalIntervalIndexer(nr, h_setts).run() hz.columns.set_levels(('Horiz_nsd',), level=0, inplace=True) av = active_voices.ActiveVoicesIndexer(nr, av_setts).run() av = pandas.concat([av]*5, axis=1, ignore_index=True) av.columns=[('av', '0'), ('av', '1'), ('av', '2'), ('av', '3'), ('av', '4')] t1 = time.time() print(str(t1-t0)) vt = interval.IntervalIndexer(nr, v_setts).run() ng = new_ngram.NewNGramIndexer((vt, hz), n_setts).run() ng_2 = new_ngram.NewNGramIndexer((hz,), n_setts_2).run()
def _get_noterest(self): """Used internally by get_data() to cache and retrieve results from the noterest.NoteRestIndexer.""" if 'noterest' not in self._analyses: self._analyses['noterest'] = noterest.NoteRestIndexer(self._get_m21_nrc_objs_no_tied()).run() return self._analyses['noterest']
def vertical(piece, pair, settings, title): ind_piece = IndexedPiece(piece) # get notes the_score = music21.converter.parse(piece) the_notes = noterest.NoteRestIndexer(the_score).run() setts = {'quarterLength': 1.0, 'method': 'ffill'} off = offset.FilterByOffsetIndexer(the_notes, setts).run() vert = interval.IntervalIndexer(off, settings).run() my_pair = vert['interval.IntervalIndexer', pair] piece_range = int(the_notes.last_valid_index()) intervals = [] for x in range(0, piece_range, 1): name = [str(my_pair.get(x))] new_name = [] for note in name: if note == 'Rest': pass elif note not in new_name: new_name.append(note) else: pass intervals.append(new_name) nodes = [] for intl in intervals: if not intl: pass else: intl = sorted(intl) intl = ' '.join(intl) nodes.append(intl) gr = nx.DiGraph() node_freq = {} edge_freq = {} for i in range(len(nodes)): if nodes[i] in node_freq: node_freq[nodes[i]] += 1 else: node_freq[nodes[i]] = 1 if i + 1 < len(nodes): edge = nodes[i] + ' - ' + nodes[i + 1] if nodes[i] == nodes[i + 1]: pass elif edge in edge_freq: edge_freq[edge] += 1 else: edge_freq[edge] = 1 for e in range(len(nodes) - 1): if 'nan' in nodes[e]: pass elif 'nan' in nodes[e + 1]: pass elif not nodes[e]: pass elif not nodes[e + 1]: pass else: gr.add_node(nodes[e], frequency=node_freq[nodes[e]]) gr.add_node(nodes[e + 1], frequency=node_freq[nodes[e + 1]]) if nodes[e] == nodes[e + 1]: pass else: gr.add_edge(nodes[e], nodes[e + 1]) sizes = [] for note in node_freq.values(): note *= 100 sizes.append(note) edges = gr.edges() weights = [] for i in range(len(edges)): edge = edges[i] width = edge_freq[edge[0] + ' - ' + edge[1]] weights.append(width) nx.draw_graphviz(gr, node_size=sizes, edge_color=weights, edge_cmap=plt.cm.Blues, node_color='#A0CBE2', width=4, arrows=False) fig = plt.gcf() fig.set_size_inches(18.5, 13.5) plt.savefig('output/graphs/results/' + title + '.png', facecolor='#97b9c3', transparent=True) plt.clf()
def test_noterest_indexer_1(self): # When the parts are empty expected = pandas.DataFrame({'0': pandas.Series(), '1': pandas.Series()}) nr_indexer = noterest.NoteRestIndexer(expected) actual = nr_indexer.run()['noterest.NoteRestIndexer'] self.assertTrue(actual.equals(expected))