def generate_targets(self): if self.targets: return self.targets l = [] # gather all file names into one list for k in self.pair_dict.keys(): l.append(k[0]) l.append(k[1]) l = list(set(l)) out_dict = self.pair_dict.copy() # loop through every combination for mid1 in l: for mid2 in l: if mid1 == mid2: pass elif (mid1, mid2) in out_dict.keys(): pass elif (mid2, mid1) in out_dict.keys(): pass else: out_dict[(mid1, mid2)] = 0 s = [] for k, v in out_dict.items(): if self.prefix in k[0]: k0 = self.pieces_dict[k[0]] else: k0 = data.Piece(k[0]) if self.prefix in k[1]: k1 = self.pieces_dict[k[1]] else: k1 = data.Piece(k[1]) s.append((k0, k1, v)) self.targets = s return s
def __init__(self, type, pieces): ''' type is a str describing the category that the training pieces are in pieces is a set of the pieces in that category ''' self.type = type self.pieces = set(pieces) mm = cmm.Markov() reverse_mm = cmm.Markov() c = patterns.fetch_classifier() segmentation = False all_keys = False for p in self.pieces: print(p) musicpiece = data.Piece(p) # segmentation off by default, all_keys off by default # this should automatically transpose everything to C major _mm = cmm.piece_to_markov_model(musicpiece, c, segmentation, all_keys) mm = mm.add_model(_mm) # now reverse the state chains to get the reverse mm b = _mm.state_chains rev_chains = [chain[::-1] for chain in _mm.state_chains] _mm.state_chains = rev_chains reverse_mm = reverse_mm.add_model(_mm) self.mm = mm self.rev_mm = reverse_mm
def generate_targets_subset(self): # produces the same list as generate_target() but limits the number of # elements which have target=0 to be (linearly) proportional to the number # elements which have target=1 if self.targets: return self.targets l = [] # gather all file names into one list for k in self.pair_dict.keys(): l.append(k[0]) l.append(k[1]) l = list(set(l)) out_dict = self.pair_dict.copy() # loop through every combination counter = 0 for mid1 in l: for mid2 in l: if mid1 == mid2: pass elif (mid1, mid2) in out_dict.keys(): pass elif (mid2, mid1) in out_dict.keys(): pass else: if counter > 4 * len(self.pair_dict.keys())**1.5: pass else: counter += 1 out_dict[(mid1, mid2)] = 0 s = [] for k, v in out_dict.items(): if self.prefix in k[0]: k0 = self.pieces_dict[k[0]] else: k0 = data.Piece(k[0]) if self.prefix in k[1]: k1 = self.pieces_dict[k[1]] else: k1 = data.Piece(k[1]) s.append((k0, k1, v)) self.targets = s return s
def add_piece(self, piece): ''' Add a new piece to the model. :param piece: location of midi :return: None ''' if piece in self.pieces: print(piece + " already in model.") return c = patterns.fetch_classifier() segmentation = False all_keys = True musicpiece = data.Piece(piece) _mm = cmm.piece_to_markov_model(musicpiece, c, segmentation, all_keys) self.mm = self.mm.add_model(_mm) b = _mm.state_chains rev_chains = [chain[::-1] for chain in _mm.state_chains] _mm.state_chains = rev_chains self.rev_mm = self.rev_mm.add_model(_mm)
] l.append(d) return l if __name__ == '__main__': svc = svm.SVC(kernel='rbf', C=10000) rforest = RandomForestClassifier(n_estimators=100) lr = linear_model.LogisticRegression(C=1) if len(sys.argv) == 1: max_ = 0 count, scores = 0, [] truth = chord_truths()[0] musicpiece = data.Piece(truth['piece']) while count < 30: #cc = chord_classifier(rforest) #cc.train() cc = fetch_classifier() allbars = cc.predict(musicpiece) s = 0 for i in range(len(truth['chords'])): if truth['chords'][i] == allbars[i]: s += 1 print('Correct Score: {}/{}'.format(s, len(truth['chords']))) count += 1 scores.append(s) print('Count =', count) #if s > max_ and s > 38:
def recommend(piece1, style, training, typ, num_recs=4, piece2=None): ''' The handler for an API call ''' # check if this already exists name = ".cached/rec/rec-{}-{}.pkl".format(style, hash(frozenset(training))) if os.path.isfile(name): with open(name, "rb") as fh: rec = cPickle.load(fh) else: rec = Recommender(style, training) rec.save() # get the incomplete piece piece1 = data.Piece(piece1) # label the piece by chords, determine the length of the seed bars use_chords = True key_sig, unshifted_state_chain = cmm.NoteState.piece_to_state_chain( piece1, use_chords) offset = cmm.get_key_offset(key_sig[0], 'C') state_chain1 = [s.transpose(offset) for s in unshifted_state_chain] if piece2 is not None: piece2 = data.Piece(piece2) key_sig, unshifted_state_chain = cmm.NoteState.piece_to_state_chain( piece2, use_chords) offset = cmm.get_key_offset(key_sig[0], 'C') state_chain2 = [s.transpose(offset) for s in unshifted_state_chain] # modes: preceding, bridging, and following if typ is 'pre': seed = [] end = [state_chain1[0]] elif piece2 is not None and typ is 'bridge': seed = [state_chain1[-1]] end = [state_chain2[0]] elif typ is 'post': seed = [state_chain1[-1]] end = [] else: # this shouldn't happen print("Error: Second piece not given") return 0 # generate new states by providing the seed bars # do this several times and see if we get a different result results = [] for i in range(num_recs): res = rec.recommend(seed, 100, end) print([g.origin + ('-' if g.chord else '') + g.chord for g in res]) if res not in results: results.append(res) # write out the 'best' result as a midi piece (for now, just pick the first one) result = results[0] if typ is 'pre': result.extend(state_chain1) music = cmm.NoteState.state_chain_to_notes(result, piece1.bar) elif typ is 'post': state_chain1.extend(result) music = cmm.NoteState.state_chain_to_notes(state_chain1, piece1.bar) else: state_chain1.extend(result) state_chain1.extend(state_chain2) music = cmm.NoteState.state_chain_to_notes(state_chain1, piece1.bar) song = [piece1.meta] song.append([n.note_event() for n in music]) midi.write('rec.mid', song)
def add_pair_by_bars(self, filename, b00, b01, b10, b11): p = data.Piece(filename) p1 = p.segment_by_bars(b00, b01) p2 = p.segment_by_bars(b10, b11) self.add_pair(p1, p2)
def get_patterns(filename, b0, b1): musicpiece = data.Piece(filename) a = analysis(musicpiece, c, b0, b1) chosenscore, chosen, labelled_sections = a.chosenscore, a.chosen, a.labelled_sections a.chosenlabels = [(b, labelled_sections[b]) for b in chosen] return a
labelled_chosen = [(b, labelled_sections[b]) for b in chosen] ''' for c1 in chosen: label = labelled_sections[c1] start, dur = c1 segmentation(Piece, d, match, default_scoring_fn, start, dur, section_prefix=label, depth+1) ''' return chosenscore, chosen, score, labelled_sections, bestscore if __name__ == '__main__': c = fetch_classifier() musicpiece = data.Piece(sys.argv[1]) if len(sys.argv ) == 5: # midi-file, min_bars, start_bar_index, end_bar_index musicpiece = musicpiece.segment_by_bars(int(sys.argv[3]), int(sys.argv[4])) d = preprocess_segments(musicpiece, c) if len(sys.argv) == 6: # midi-file, b00, b01, b10, b11 b00, b01, b10, b11 = [int(n) for n in sys.argv[2:6]] def compare_bars(musicpiece, c, b00, b01, b10, b11): one = musicpiece.segment_by_bars(b00, b01) two = musicpiece.segment_by_bars(b10, b11) features = [one.compare_with(two)] similarity_score = c.predict_proba(features)[0][