def generate_targets(self): if self.targets: return self.targets l = [] # gather all file names into one list for k in self.pair_dict.keys(): l.append(k[0]) l.append(k[1]) l = list(set(l)) out_dict = self.pair_dict.copy() # loop through every combination for mid1 in l: for mid2 in l: if mid1 == mid2: pass elif (mid1, mid2) in out_dict.keys(): pass elif (mid2, mid1) in out_dict.keys(): pass else: out_dict[(mid1, mid2)] = 0 s = [] for k, v in out_dict.iteritems(): if self.prefix in k[0]: k0 = self.pieces_dict[k[0]] else: k0 = data.piece(k[0]) if self.prefix in k[1]: k1 = self.pieces_dict[k[1]] else: k1 = data.piece(k[1]) s.append((k0, k1, v)) self.targets = s return s
def main(): # initialize a PlaybackUtility for each midi file, put them into a list -> playback_utils playback_utils = [] for f in ['mid/owl.mid', 'mid/lost.mid']: musicpiece = data.piece(f) pbu = playback.PlaybackUtility() pbu.add_notes(musicpiece.unified_track.notes) playback_utils.append(pbu) tempo_reciprocal = 3000 # 'speed' of playback. need to adjust this carefully playback.init_midi_channel() # set up channel, and prompt MIDI device reset before continuing # loop loop = True piece_index = 0 # index of the piece currently playing start_time = time.clock() while loop: # read/poll the trigger file text = playback.read_trigger_file('trigger_file') if text: print 'read triggerfile:', text piece_index = (piece_index + 1) % 2 # switch pieces cur_time = time.clock() playback_pos = int((cur_time - start_time) * 1000000) / tempo_reciprocal # play those notes using the corresponding PlaybackUtility playback_utils[piece_index].run(playback_pos) if playback_utils[piece_index].isTerminated(): loop = False
def addMidiSong(songmidi): def savetreamtomidi(filename, stream): filepath = os.path.join(segments_folder, filename) mf = midi.translate.streamToMidiFile(stream) mf.open(filepath, 'wb') mf.write() mf.close() transitions = _loadtransitiontable() musicpiece = data.piece(songmidi) segmented = experiments.analysis(musicpiece, patterns.fetch_classifier()) chosenscore, chosen, labelled_sections = segmented.chosenscore, segmented.chosen, segmented.labelled_sections musicstream = converter.parse(songmidi) filename = os.path.splitext(os.path.basename(songmidi))[0] for i, segment_score in enumerate(chosenscore): # cant process last segment if i >= len(chosenscore) - 1: continue start, duration = segment_score[0] first = musicstream.measures(start, start + duration) first_seg_str = "{}_{}_{}.mid".format(filename, start, start+duration) savetreamtomidi(first_seg_str, first) start, duration = chosenscore[i + 1][0] second = musicstream.measures(start, start + duration) second_seg_str = "{}_{}_{}.mid".format(filename, start, start+duration) savetreamtomidi(second_seg_str, second) #update transitions if (first_seg_str in transitions): transitions[first_seg_str] += [second_seg_str] else: transitions[first_seg_str] = [second_seg_str] _savetransitiontable(transitions)
def main(): # initialize a PlaybackUtility for each midi file, put them into a list -> playback_utils playback_utils = [] for f in ['mid/owl.mid', 'mid/lost.mid']: musicpiece = data.piece(f) pbu = playback.PlaybackUtility() pbu.add_notes(musicpiece.unified_track.notes) playback_utils.append(pbu) tempo_reciprocal = 3000 # 'speed' of playback. need to adjust this carefully playback.init_midi_channel( ) # set up channel, and prompt MIDI device reset before continuing # loop loop = True piece_index = 0 # index of the piece currently playing start_time = time.clock() while loop: # read/poll the trigger file text = playback.read_trigger_file('trigger_file') if text: print('read triggerfile:', text) piece_index = (piece_index + 1) % 2 # switch pieces cur_time = time.clock() playback_pos = int( (cur_time - start_time) * 1000000) / tempo_reciprocal # play those notes using the corresponding PlaybackUtility playback_utils[piece_index].run(playback_pos) if playback_utils[piece_index].isTerminated(): loop = False
def generate_output(): classifier = patterns.fetch_classifier() segmentation = False all_keys = False if len(sys.argv) == 4: # positional arguments: <midi-file> <start-bar> <end-bar> musicpiece = data.piece(sys.argv[1]) musicpiece = musicpiece.segment_by_bars(int(sys.argv[2]), int(sys.argv[3])) mm = piece_to_markov_model(musicpiece, classifier, segmentation) else: pieces = ["mid/hilarity.mid", "mid/froglegs.mid", "mid/easywinners.mid"] mm = Markov() # initialize an empty model # generate a model _mm for each piece then add them together for p in pieces: musicpiece = data.piece(p) _mm = piece_to_markov_model(musicpiece, classifier, segmentation, all_keys) mm = mm.add_model(_mm) song, gen, notes = generate_song(mm, musicpiece.meta, musicpiece.bar, segmentation) midi.write('output.mid', song)
def generate_targets_subset(self): # produces the same list as generate_target() but limits the number of # elements which have target=0 to be (linearly) proportional to the number # elements which have target=1 if self.targets: return self.targets l = [] # gather all file names into one list for k in self.pair_dict.keys(): l.append(k[0]) l.append(k[1]) l = list(set(l)) out_dict = self.pair_dict.copy() # loop through every combination counter = 0 for mid1 in l: for mid2 in l: if mid1 == mid2: pass elif (mid1, mid2) in out_dict.keys(): pass elif (mid2, mid1) in out_dict.keys(): pass else: if counter > 4 * len(self.pair_dict.keys()) ** 1.5: pass else: counter += 1 out_dict[(mid1, mid2)] = 0 s = [] for k, v in out_dict.iteritems(): if self.prefix in k[0]: k0 = self.pieces_dict[k[0]] else: k0 = data.piece(k[0]) if self.prefix in k[1]: k1 = self.pieces_dict[k[1]] else: k1 = data.piece(k[1]) s.append((k0, k1, v)) self.targets = s return s
x[i] = 0 if (sc2[i].chord, sc2[i+1].chord) in chord_transitions else 1 fig = figure() x.shape = 1, len(x) axprops = dict(xticks=[], yticks=[]) barprops = dict(aspect='auto', cmap=cm.binary, interpolation='bicubic') ax = fig.add_axes([0.1, 0.1, 0.8, 0.1], **axprops) ax.imshow(x, **barprops) show() if __name__ == '__main__': c = patterns.fetch_classifier() chord_transitions = set() for i in range(1, len(sys.argv)-1): piece1 = data.piece(sys.argv[i]) sc_ = cmm.NoteState.piece_to_state_chain(piece1, use_chords=True) schords = [s.chord for s in sc_ ] schords2 = [] for i in range(1, 6): schords2 += [ chords.translate(chords.untranslate(s.chord.split('m')[0])+i) + ('m' if 'm' in s.chord else '') for s in sc_ ] for i in range(1, 7): schords2 += [ chords.translate(chords.untranslate(s.chord.split('m')[0])-i) + ('m' if 'm' in s.chord else '') for s in sc_ ] schords += schords2 # assume chain length is 1 chord_transitions = chord_transitions.union({(schords[i], schords[i+1]) for i in range(len(schords)-1)}) piece2 = data.piece(sys.argv[-1]) mixture(chord_transitions, piece2)
def add_pair_by_bars(self, filename, b00, b01, b10, b11): p = data.piece(filename) p1 = p.segment_by_bars(b00, b01) p2 = p.segment_by_bars(b10, b11) self.add_pair(p1, p2)
def get_patterns(filename, b0, b1): musicpiece = data.piece(filename) a = analysis(musicpiece, c, b0, b1) chosenscore, chosen, labelled_sections = a.chosenscore, a.chosen, a.labelled_sections a.chosenlabels = [(b, labelled_sections[b]) for b in chosen] return a
self.next_mm = None print 'replace model with [{}]'.format(self.get_model_name()) # generate the next NoteState elem = self.mm.generate_next_state(self.buf) if elem != cmm.Markov.STOP_TOKEN: self.buf = self.mm.shift_buffer(self.buf, elem) yield elem if __name__ == "__main__": # load MIDI files (from program arguments if provided) filenames = sys.argv[1:] if not filenames: #filenames = ["mid/easywinners.mid", "mid/froglegs.mid", "mid/hilarity.mid", "mid/sjeugen.mid"] filenames = ["mid/mario2.mid", "mid/mario3.mid"] musicpieces = {f: data.piece(f) for f in filenames} # train a markov model on each piece to make a pool of Markov models # pair them up with their MIDI filenames and put inside a dictionary # can turn on all_keys if desired markov_models = {f: cmm.piece_to_markov_model(piece, segmentation=False, all_keys=False) for f, piece in musicpieces.iteritems()} # initialize the note state generator with a random initial markov model initial_markov = random.choice(markov_models.keys()) dmnsg = DynamicMarkovNoteStateGenerator(markov_models[initial_markov], markov_models) nsgen = dmnsg.next_state_generator() # init some parameters tempo_reciprocal = 1500 # 'speed' of playback. need to adjust this carefully, and by trial and error bar = 1024 # used for generating the midi events playback.init_midi_channel() # set up channel, and prompt MIDI device reset
print 'replace model with [{}]'.format(self.get_model_name()) # generate the next NoteState elem = self.mm.generate_next_state(self.buf) if elem != cmm.Markov.STOP_TOKEN: self.buf = self.mm.shift_buffer(self.buf, elem) yield elem if __name__ == "__main__": # load MIDI files (from program arguments if provided) filenames = sys.argv[1:] if not filenames: #filenames = ["mid/easywinners.mid", "mid/froglegs.mid", "mid/hilarity.mid", "mid/sjeugen.mid"] filenames = ["mid/mario2.mid", "mid/mario3.mid"] musicpieces = {f: data.piece(f) for f in filenames} # train a markov model on each piece to make a pool of Markov models # pair them up with their MIDI filenames and put inside a dictionary # can turn on all_keys if desired markov_models = { f: cmm.piece_to_markov_model(piece, segmentation=False, all_keys=False) for f, piece in musicpieces.iteritems() } # initialize the note state generator with a random initial markov model initial_markov = random.choice(markov_models.keys()) dmnsg = DynamicMarkovNoteStateGenerator(markov_models[initial_markov], markov_models) nsgen = dmnsg.next_state_generator()
plot = np.zeros((piece1.num_bars, piece2.num_bars), dtype=np.float) segment1 = [ piece1.segment_by_bars(i, i+1) for i in range(piece1.num_bars) ] segment2 = [ piece2.segment_by_bars(j, j+1) for j in range(piece2.num_bars) ] print "done 1/2" for i in range(piece1.num_bars): for j in range(piece2.num_bars): features = [segment1[i].compare_with(segment2[j])] score = c.predict_proba(features)[0][1] plot[i][j] = 0 if score >= 0.5 else 1 - score #plot[i][j] = 1 - score print "done 2/2" return np.fliplr(plot) if __name__ == '__main__': c = patterns.fetch_classifier() if len(sys.argv) == 3: piece1 = data.piece(sys.argv[1]) piece2 = data.piece(sys.argv[2]) if len(sys.argv) == 2: piece1 = data.piece(sys.argv[1]) piece2 = data.piece(sys.argv[1]) plot = recurrence(c, piece1, piece2) io.imshow(plot) io.show()
x.shape = 1, len(x) axprops = dict(xticks=[], yticks=[]) barprops = dict(aspect='auto', cmap=cm.get_cmap('binary'), interpolation='bicubic') ax = fig.add_axes([0.1, 0.1, 0.8, 0.1], **axprops) ax.imshow(x, **barprops) show() if __name__ == '__main__': c = patterns.fetch_classifier() chord_transitions = set() for i in range(1, len(sys.argv) - 1): piece1 = data.piece(sys.argv[i]) sc_ = cmm.NoteState.piece_to_state_chain(piece1, use_chords=True) schords = [s.chord for s in sc_] schords2 = [] for i in range(1, 6): schords2 += [ chords.translate( chords.untranslate(s.chord.split('m')[0]) + i) + ('m' if 'm' in s.chord else '') for s in sc_ ] for i in range(1, 7): schords2 += [ chords.translate( chords.untranslate(s.chord.split('m')[0]) - i) + ('m' if 'm' in s.chord else '') for s in sc_ ]
d['chords'] = [ 'C#m', 'C#m', 'Am', 'C#m', 'C#m', 'G#', 'C#m', 'Em', 'Em', 'Em', 'G', 'Cm', 'Bm', 'Bm', 'Bm', 'Em', 'B', 'Em', 'Bm', 'C#', 'Gm', 'F#m', 'F#m', 'C#m', 'F#m', 'G#', 'C#m', 'G#', 'G#', 'C#m', 'C#m', 'B#m', 'C#m', 'C#m', 'B#m', 'B#m', 'B#m', 'G#', 'G#', 'G#', 'G#', 'C#m', 'G#', 'C#m', 'Em', 'Em', 'B', 'Em', 'G#', 'Dm', 'C#m', 'F#m', 'F#m', 'C#m', 'F#m', 'C#m', 'Am', 'F#m', 'C#m', 'C#m', 'G#', 'C#m', 'B#m', 'C#m', 'B#m', 'C#m', 'C#m', 'C#m', 'C#m'] l.append(d) return l if __name__ == '__main__': svc = svm.SVC(kernel='rbf', C=10000) rforest = RandomForestClassifier(n_estimators=100) lr = linear_model.LogisticRegression(C=1) if len(sys.argv) == 1: max_ = 0 count, scores = 0, [] truth = chord_truths()[0] musicpiece = data.piece(truth['piece']) from sklearn.externals import joblib while count < 30: #cc = chord_classifier(rforest) #cc.train() cc = fetch_classifier() allbars = cc.predict(musicpiece) s = 0 for i in range(len(truth['chords'])): if truth['chords'][i] == allbars[i]: s += 1 print('Correct Score: {}/{}'.format(s, len(truth['chords']))) count += 1 scores.append(s) print('Count =', count)
labelled_chosen = [(b, labelled_sections[b]) for b in chosen] ''' for c1 in chosen: label = labelled_sections[c1] start, dur = c1 segmentation(Piece, d, match, default_scoring_fn, start, dur, section_prefix=label, depth+1) ''' return chosenscore, chosen, score, labelled_sections, bestscore if __name__ == '__main__': c = fetch_classifier() musicpiece = data.piece(sys.argv[1]) if len(sys.argv) == 5: # midi-file, min_bars, start_bar_index, end_bar_index musicpiece = musicpiece.segment_by_bars(int(sys.argv[3]), int(sys.argv[4])) d = preprocess_segments(musicpiece, c) if len(sys.argv) == 6: # midi-file, b00, b01, b10, b11 b00, b01, b10, b11 = [ int(n) for n in sys.argv[2:6] ] def compare_bars(musicpiece, c, b00, b01, b10, b11): one = musicpiece.segment_by_bars(b00, b01) two = musicpiece.segment_by_bars(b10, b11) features = [one.compare_with(two)] similarity_score = c.predict_proba(features)[0][1] # get similarity_score print "SIMPROB:", similarity_score headers = [ 'Feature' + str(i) for i in range(len(features[0])) ] features.insert(0, headers)
''' buf = mm.get_start_buffer() elem = mm.generate_next_state(buf) yield elem while elem != cmm.Markov.STOP_TOKEN: buf = mm.shift_buffer(buf, elem) elem = mm.generate_next_state(buf) # generate another if elem != cmm.Markov.STOP_TOKEN: yield elem if __name__ == "__main__": # load a MIDI file musicpiece = data.piece("mid/hilarity.mid") # train a markov model on this piece # nothing fancy, no segmentation or key shifts mm = cmm.piece_to_markov_model(musicpiece, segmentation=False, all_keys=False) # initialize the note state generator nsgen = NoteStateGenerator(mm) # init some parameters tempo_reciprocal = 1500 # 'speed' of playback. need to adjust this carefully, and by trial and error bar = 1024 # used for generating the midi events playback.init_midi_channel( ) # set up channel, and prompt MIDI device reset