def generate_output(args): c = patterns.fetch_classifier() segmentation = False all_keys = False if args.mid and args.start is not None and args.end is not None: musicpiece = Piece(args.mid) musicpiece = musicpiece.segment_by_bars(args.start, args.end) mm = piece_to_markov_model(musicpiece, c, segmentation) song, gen, a = generate_song(mm, musicpiece.meta, musicpiece.bar, segmentation) else: pieces = glob.glob('./mid/Bach/*') #pieces = ["mid/Bach/bwv804.mid", "mid/Bach/bwv802.mid"] mm = Markov() # generate a model _mm for each piece then add them together for p in pieces: musicpiece = Piece(p) _mm = piece_to_markov_model(musicpiece, c, segmentation, all_keys) mm = mm.add_model(_mm) song, gen, a = generate_song(mm, musicpiece.meta, musicpiece.bar, segmentation) midi.write('output.mid', song)
def generate(self, output='output/output.mid', save=True): """Generates a song that can be serialized via the midi library.""" hidden_chain = self.hmm.generate_hidden() state_chain = self.hmm.generate(hidden_chain, self.mm.markov) notes = NoteState.state_chain_to_notes(state_chain, self.piece.bar) song = [self.piece.meta] + [[ n.note_event() for n in notes ]] if save: midi.write(output, song) return hidden_chain, state_chain
def use(model, filename="music.mid"): import cmm statemap=cmm.use(model[0]) song=[[]] song[0]=[["ticks",0,256],["time",0,model[1][0],model[1][1]]] for i in range(len(statemap)): song+=[[]] for state in statemap[i]: song[-1]+=[["note",state[0],state[1]-state[0],i,state[2][0]]] import midi midi.write(filename,song) return
def generate_output(): classifier = patterns.fetch_classifier() segmentation = False all_keys = False if len(sys.argv) == 4: # positional arguments: <midi-file> <start-bar> <end-bar> musicpiece = data.piece(sys.argv[1]) musicpiece = musicpiece.segment_by_bars(int(sys.argv[2]), int(sys.argv[3])) mm = piece_to_markov_model(musicpiece, classifier, segmentation) else: pieces = ["mid/hilarity.mid", "mid/froglegs.mid", "mid/easywinners.mid"] mm = Markov() # initialize an empty model # generate a model _mm for each piece then add them together for p in pieces: musicpiece = data.piece(p) _mm = piece_to_markov_model(musicpiece, classifier, segmentation, all_keys) mm = mm.add_model(_mm) song, gen, notes = generate_song(mm, musicpiece.meta, musicpiece.bar, segmentation) midi.write('output.mid', song)
pieces = [args.input or "./mid/Bach/bwv803.mid"] # pieces = collect_all_midis('./mid/Bach') print(f'Generating from {pieces} with context length {args.context or 1}') hmm = HiddenMarkov(chain_length=args.context or 1) mm = Markov(chain_length=args.context or 1) for piece in pieces: musicpiece = Piece(piece) # here all_bars is the list of chord labels generated for each bar key_sig, state_chain, all_bars, observations = NoteState.piece_to_state_chain( musicpiece) hmm.add(all_bars) hmm.train_obs(observations, key_sig) mm.add(state_chain) print('Training complete.') hidden_chain = hmm.generate_hidden() if len(hidden_chain) < 100: print(f'result too short: {len(hidden_chain)}') note_chain = hmm.generate(hidden_chain, mm.markov) notes = NoteState.state_chain_to_notes(note_chain, musicpiece.bar) song = [musicpiece.meta] + [[n.note_event() for n in notes]] print(f'bar number: {hmm.temporary_bar_counter}') print(f'klist succeeded: {hmm.temporary_klist_counter} times') print(f'regen_success: {hmm.regen_success_counter}') midi.write(args.output or 'generated/output_hmm.mid', song)
import midi import pprint x = midi.FileReader() p = midi.read('a.mid') print(p) input() midi.write('aa.mid', p) p = midi.read('aa.mid') print(p) input() #midi.write('aaa.mid', p) #p = midi.read('aaa.mid')
def recommend(piece1, style, training, typ, num_recs=4, piece2=None): ''' The handler for an API call ''' # check if this already exists name = ".cached/rec/rec-{}-{}.pkl".format(style, hash(frozenset(training))) if os.path.isfile(name): with open(name, "rb") as fh: rec = cPickle.load(fh) else: rec = Recommender(style, training) rec.save() # get the incomplete piece piece1 = data.Piece(piece1) # label the piece by chords, determine the length of the seed bars use_chords = True key_sig, unshifted_state_chain = cmm.NoteState.piece_to_state_chain( piece1, use_chords) offset = cmm.get_key_offset(key_sig[0], 'C') state_chain1 = [s.transpose(offset) for s in unshifted_state_chain] if piece2 is not None: piece2 = data.Piece(piece2) key_sig, unshifted_state_chain = cmm.NoteState.piece_to_state_chain( piece2, use_chords) offset = cmm.get_key_offset(key_sig[0], 'C') state_chain2 = [s.transpose(offset) for s in unshifted_state_chain] # modes: preceding, bridging, and following if typ is 'pre': seed = [] end = [state_chain1[0]] elif piece2 is not None and typ is 'bridge': seed = [state_chain1[-1]] end = [state_chain2[0]] elif typ is 'post': seed = [state_chain1[-1]] end = [] else: # this shouldn't happen print("Error: Second piece not given") return 0 # generate new states by providing the seed bars # do this several times and see if we get a different result results = [] for i in range(num_recs): res = rec.recommend(seed, 100, end) print([g.origin + ('-' if g.chord else '') + g.chord for g in res]) if res not in results: results.append(res) # write out the 'best' result as a midi piece (for now, just pick the first one) result = results[0] if typ is 'pre': result.extend(state_chain1) music = cmm.NoteState.state_chain_to_notes(result, piece1.bar) elif typ is 'post': state_chain1.extend(result) music = cmm.NoteState.state_chain_to_notes(state_chain1, piece1.bar) else: state_chain1.extend(result) state_chain1.extend(state_chain2) music = cmm.NoteState.state_chain_to_notes(state_chain1, piece1.bar) song = [piece1.meta] song.append([n.note_event() for n in music]) midi.write('rec.mid', song)
def generateNotes(riff, master, pMat, noteDict, model, filename): #generate dictionary of all possible riffs riffChoice = generateRiffDictionary(riff) #from the dictionary of riffs, get a random one for the sequence from random import choice noteOn = 0 song = [] listedSong = [] listedTimes = [] temp = [] seq = [0] #choose a random sequence from the dictionary and put the chorded notes # in listedSong #for i in range(len(seq)): while seq[-1] != (len(pMat) - 1): selectedRiff = choice(riffChoice[seq[-1]]) riffNotes = master[selectedRiff[0]:selectedRiff[1] + 1] if riffNotes == []: continue # subtracts first note from the noteOn value in order to set relative duration duration = riffNotes[0][0][1] - noteOn for k in range(len(riffNotes)): currentChord = [] #adjusts duration from riff to be relative to new placement for j in range(len(riffNotes[k])): currentNote = ['note', riffNotes[k][j][1] - duration] currentNote.append(riffNotes[k][j][2] - riffNotes[k][j][1]) currentNote.append(riffNotes[k][j][3]) currentNote.append(riffNotes[k][j][4]) currentChord.append(currentNote) # this finds the correct placement of the next note noteOn = currentNote[1] + currentNote[2] #noteOn = currentNote[1] listedSong.append(currentChord) prevseq = seq seq = getNextRiff(seq, pMat, noteDict, listedSong) # Attempt to not have repeated sections #if len(seq) > 2 and seq[-1] == seq[-2]: #seq.pop() #seq = getNextRiff(seq, pMat, noteDict, listedSong) if len(seq) == len(prevseq): listedSong.pop() print 'New Sequence:', seq # append final riff selectedRiff = choice(riffChoice[len(pMat)]) riffNotes = master[selectedRiff[0]:selectedRiff[1]] duration = riffNotes[0][0][1] - noteOn for k in range(len(riffNotes)): currentChord = [] #adjusts duration from riff to be relative to new placement for j in range(len(riffNotes[k])): currentNote = ['note', riffNotes[k][j][1] - duration] currentNote.append(riffNotes[k][j][2] - riffNotes[k][j][1]) currentNote.append(riffNotes[k][j][3]) currentNote.append(riffNotes[k][j][4]) currentChord.append(currentNote) listedSong.append(currentChord) #notes are ['note', noteon, duration, track, noteval] #right now the master track has the ['note', noteon, noteoff, track, noteval, duration] #remove the notes from the embedded list for i in range(len(listedSong)): for j in range(len(listedSong[i])): temp += [listedSong[i][j]] #sort the notes by track temp.sort(key=lambda x: int(x[3])) tracks = [] #create empty arrays for the tracks for i in range(temp[-1][3] + 1): tracks.append([]) #insert the notes into the right tracks for i in range(len(temp)): tracks[temp[i][3]].append(temp[i]) #insert the tracks into the song for i in range(len(tracks)): song.append(tracks[i]) #insert the header song.insert( 0, [["ticks", 0, 256], ["time", 0, model[0][1][2], model[0][1][3]]]) import midi midi.write(filename, song) return
def write(self, path=None): if not path: path = self.path midi.write(path, self.midi) self.unwritten = False
def generateNotes(riff, master, pMat, noteDict, model, filename, dualInput=False, riff2=None, master2=None, pMat2=None, noteDict2=None): from random import choice #generate dictionary of all possible riffs riffChoice = generateRiffDictionary(riff) if dualInput: riffChoice2 = generateRiffDictionary(riff2) # merge note dictionaries, keys are the last note # values are a list of tuples of (model, section) # eg: 72:[(1, 3), (2, 0)] refers to note 72 can be followed by # model 1 section 3 or model 2 section 0 dualDict = {} for key in noteDict: if key not in dualDict: dualDict[key] = [] for val in noteDict[key]: dualDict[key].append((1, val)) for key in noteDict2: if key not in dualDict: dualDict[key] = [] for val in noteDict2[key]: dualDict[key].append((2, val)) #from the dictionary of riffs, get a random one for the sequence noteOn = 0 song = [] listedSong = [] listedTimes = [] temp = [] seq = [[1, 0]] #choose a random sequence from the dictionary and put the chorded notes # in listedSong while ( seq[-1][0] == 1 and seq[-1][1] != (len(pMat) - 1)) or seq[-1][0] == 2 and seq[-1][1] != (len(pMat2) - 1): if seq[-1][0] == 2: selectedRiff = choice(riffChoice2[seq[-1][1]]) riffNotes = master2[selectedRiff[0]:selectedRiff[1] + 1] else: selectedRiff = choice(riffChoice[seq[-1][1]]) riffNotes = master[selectedRiff[0]:selectedRiff[1] + 1] if riffNotes == []: continue # subtracts first note from the noteOn value in order to set relative duration duration = riffNotes[0][0][1] - noteOn for k in range(len(riffNotes)): currentChord = [] #adjusts duration from riff to be relative to new placement for j in range(len(riffNotes[k])): currentNote = ['note', riffNotes[k][j][1] - duration] currentNote.append(riffNotes[k][j][2] - riffNotes[k][j][1]) currentNote.append(riffNotes[k][j][3]) currentNote.append(riffNotes[k][j][4]) currentChord.append(currentNote) # this finds the correct placement of the next note noteOn = currentNote[1] + currentNote[2] #noteOn = currentNote[1] listedSong.append(currentChord) prevseq = seq if dualInput: seq = getNextDualRiff(seq, dualDict, listedSong) else: seq = getNextRiff(seq, pMat, noteDict, listedSong) # Attempt to not have repeated sections #if len(seq) > 2 and seq[-1] == seq[-2]: #seq.pop() #seq = getNextRiff(seq, pMat, noteDict, listedSong) #if len(seq) == len(prevseq): #listedSong.pop() print 'New Sequence:', seq # append final riff selectedRiff = choice(riffChoice[len(pMat)]) riffNotes = master[selectedRiff[0]:selectedRiff[1]] duration = riffNotes[0][0][1] - noteOn for k in range(len(riffNotes)): currentChord = [] #adjusts duration from riff to be relative to new placement for j in range(len(riffNotes[k])): currentNote = ['note', riffNotes[k][j][1] - duration] currentNote.append(riffNotes[k][j][2] - riffNotes[k][j][1]) currentNote.append(riffNotes[k][j][3]) currentNote.append(riffNotes[k][j][4]) currentChord.append(currentNote) listedSong.append(currentChord) #notes are ['note', noteon, duration, track, noteval] #right now the master track has the ['note', noteon, noteoff, track, noteval, duration] #remove the notes from the embedded list for i in range(len(listedSong)): for j in range(len(listedSong[i])): temp += [listedSong[i][j]] #sort the notes by track temp.sort(key=lambda x: int(x[3])) tracks = [] #create empty arrays for the tracks for i in range(temp[-1][3] + 1): tracks.append([]) #insert the notes into the right tracks for i in range(len(temp)): tracks[temp[i][3]].append(temp[i]) #insert the tracks into the song for i in range(len(tracks)): song.append(tracks[i]) #insert the header song.insert( 0, [["ticks", 0, 256], ["time", 0, model[0][1][2], model[0][1][3]]]) import midi midi.write(filename, song) return