def midi_from_melodies(melodies): notes = [[Note(x%Octave, 4 + x//Octave, 1/4) for x in melody] for melody in melodies] chords = [NoteSeq([melody_notes[i] for melody_notes in notes]) for i in range(len(melody))] midi = Midi(tempo=120) midi.seq_chords(chords) return midi
def pic2music(imgfile): sizes=50 lightness,imageAttr =pic.getImageInfo(imgfile) random.seed(hashlib.new("md5",imageAttr).hexdigest()) sett1=[[11,4,6,7,2],[0,2,4,7,9],[1,3,6,8,10],[2,4,6,7,9],[2,4,7,9,11]] j=0 notes1=NoteSeq("") diao=int((lightness*5)) octave = int((lightness*10)) for i in imageAttr: if (j+1)%sizes<sizes: if abs(imageAttr[j%sizes]-imageAttr[(j+1)%sizes])>3: if((j%12)%2==0): notes1=notes1+Note(value=sett1[diao][random.randint(0,4)], octave=random.randint(octave,octave+3),dur=0.08*random.randint(0,4), volume=127) # else : # notes1=notes1+Note(value=1, octave=1,dur=0.01, volume=0) j+=1 midi = Midi(1, tempo=90,instrument=0) midi.seq_notes(notes1, track=0) midi.write(imgfile+".mid")
def strToMidi(msg, fileName): from pyknon.genmidi import Midi from pyknon.music import NoteSeq from pyknon.music import Note notes = { '0' : Note(value=0, octave=5), # Do '1' : Note(value=2, octave=5), # Re '2' : Note(value=4, octave=5), # Mi '3' : Note(value=5, octave=5), # Fa '4' : Note(value=7, octave=5), # Sol '5' : Note(value=9, octave=5), # La '6' : Note(value=11, octave=5), # Si '7' : Note(value=0, octave=6), '8' : Note(value=2, octave=6), '9' : Note(value=4, octave=6), 'a' : Note(value=5, octave=6), 'b' : Note(value=7, octave=6), 'c' : Note(value=9, octave=6), 'd' : Note(value=11, octave=6), 'e' : Note(value=0, octave=7), 'f' : Note(value=2, octave=7) } msgHex = msg.encode('hex'); sequence = NoteSeq('C1') before = '' for i in msgHex: if before == i: sequence.append(Note(value=4, octave=7)) sequence.append(notes[i]) before = i midi = Midi(1, tempo = 290) midi.seq_notes(sequence, track=0) midi.write(fileName)
def write(self, filename, sequences): midi = Midi(len(sequences), tempo=self.bpm) sequences = enumerate(sequences) note_seqs = [self.get_track(s, i + 1) for i, s in sequences] for track, seq in enumerate(note_seqs): midi.seq_notes(seq, track=track) midi.write(filename)
def clean_freq(samples): "create freq samples" sample_size = len(samples) chords = [NoteSeq([Note(classes[i]) for i in sample]) for sample in samples] midi = Midi(1, tempo=tempo) for i in range(sample_size): midi.seq_chords([chords[i]], time=5*i) midi.write("temp.mid") subprocess.call("timidity temp.mid -Ow -o temp.wav".split(), stdout=subprocess.PIPE) rate, data = wavfile.read('temp.wav') return channel_freqs(data.T[0])[:sample_size*10:10].astype(int) / suppress_noise
def writeMidi(path, seq): if type(seq[0]) == Note: print("One Track") midi = Midi(1, tempo=100) midi.seq_notes(seq) else: midi = Midi(len(seq), tempo=100) i = 0 for track in seq: midi.seq_notes(track, track=i) print(track) i+=1 midi.write(path)
def gen_midi(rm_seq, cprogr, cprog3, cprog9, filename): midi = Midi(number_tracks=4, tempo=120) midi.seq_notes(rm_seq, track=0) midi.seq_notes(cprogr, track=1) midi.seq_notes(cprog3, track=2) midi.seq_notes(cprog9, track=3) midi.write(filename)
def crab_canon(): theme2 = NoteSeq("file://canon-crab") rev_theme = theme2.transposition(-12).retrograde() midi = Midi(2, tempo=120) midi.seq_notes(theme2) midi.seq_notes(rev_theme, track=1) midi.write("midi/canon-crab.mid")
def write_midi(arg_notes_list, file_name='temp.mid', tempo=60): # Rebuild the list. This changes all Note objects to NoteSeq objects write_notes = [] for note in arg_notes_list: if isinstance(note, Note) or isinstance(note, Rest): next_entry = NoteSeq([note]) elif isinstance(note, NoteSeq): next_entry = note else: print 'write_midi: Attempting to write non-Note or NoteSeq object to midi' raise write_notes.append(next_entry) midi = Midi(1, tempo) midi.seq_chords(write_notes, track=0) midi.write(file_name)
def harmonize_scale(forte): pitch_set = pcset.PC_SETS[forte] scale = numbers_to_noteseq(pitch_set) midi = Midi() t0 = midi.seq_notes(scale) t1 = midi.seq_chords(scale.harmonize(interval=3), time=t0 + 1) t2 = midi.seq_chords(scale.harmonize(interval=4), time=t1 + 1) midi.seq_chords(scale.harmonize(interval=5), time=t2 + 1) midi.write("midi/scales.midi")
def convert_chords_to_midi(chord_list, filename): '''Requires pyknon to create the midi...''' from pyknon.music import NoteSeq from pyknon.genmidi import Midi chord_prog = [] midi = Midi(1, tempo=90) for chord in chord_list: chord = chord.upper() chord = chord.replace('B', 'BB') chord = chord.replace('H', 'B') chord_prog.append(NoteSeq(chord)) midi.seq_chords(chord_prog, 0, 0) midi.write(filename)
def josquin(): main_theme = NoteSeq("file://josquin") theme1 = main_theme.stretch_dur(0.66666) theme2 = main_theme[0:24].stretch_dur(2).transp(Note("C")) theme3 = main_theme[0:50] midi = Midi(3, tempo=80) midi.seq_notes(theme1, track=0) midi.seq_notes(theme2, track=1) midi.seq_notes(theme3, track=2) midi.write("midi/josquin.mid")
def crab_canon(filename): theme = NoteSeq("file://%s.notes" % filename) rev_theme = theme.transposition(-12).retrograde() midi = Midi(2, tempo=120) midi.seq_notes(theme) midi.seq_notes(rev_theme, track=1) midi.write("%s.mid" % filename)
def canon(): theme1 = NoteSeq("file://canon-quaerendo-invenietis") part1 = theme1 + theme1[2:] + theme1[2:11] part2 = theme1 + theme1[2:] + theme1[2:4] voice1 = part1 voice2 = part2.inversion_startswith(Note(2, 4)) midi = Midi(2, tempo=150) midi.seq_notes(voice1, time=3, track=0) midi.seq_notes(voice2, time=13, track=1) midi.write("midi/canon.mid")
def saveSong(self, filename): def nameFile(filename, iterator): return "".join(filename.split(".")[:-1]) + str( iterator) + "." + filename.split(".")[-1] if self.__notes != []: self.__addTrack() # for track in self.__tracks: # track.addPause() fileNameIterator = 0 for track in self.__tracks: midi = Midi(number_tracks=1, instrument=track.getInstrument().value) notes = NoteSeq(track.getNotes()) midi.seq_notes(notes, track=0) midi.write(nameFile(filename, fileNameIterator)) fileNameIterator += 1 fileNameIterator -= 1 if fileNameIterator > 0: for i in range(fileNameIterator): os.system("python midisox.py --combine concatenate " + nameFile(filename, i) + " " + nameFile(filename, i + 1) + " " + nameFile(filename, i + 1)) os.remove(nameFile(filename, i)) if os.path.exists(filename): os.remove(filename) os.rename(nameFile(filename, fileNameIterator), filename)
def writesong(): soprano = NoteSeq() alto = NoteSeq() tenor = NoteSeq() bass = NoteSeq() for x in range(0, SONG_DURATION, 1): if x == SONG_DURATION: soprano += random_notes(I_PITCHLIST_AT, SOP_RANGE, 1, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 1, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 1, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 1, 120) elif x % 4 == 0: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 1: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(IV_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(IV_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(IV_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 2: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(V_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(V_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(V_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 3: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(VI_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(VI_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(VI_PITCHLIST_B, BAS_RANGE, 0.5, 90) midi = Midi(4, tempo=150) midi.seq_notes(soprano, track=0) midi.seq_notes(alto, track=1) midi.seq_notes(tenor, track=2) midi.seq_notes(bass, track=3) return midi
def generate(key, outfile): # scale = open("./scales/"+key,"r") cipher = open("c_text", "r") dur = open("c_dur", "r") notes1 = [] #using for getting the data and creating lits. while True: ci = cipher.readline() du = dur.readline() if not ci or not du: break c = int(ci) d = float(du) notes1.append(Note(value=c, dur=d)) for note in notes1: print(note) midi = Midi(1, tempo=80) midi.seq_notes(notes1, track=0) midi.write("inter.mid") cipher.close() dur.close() os.remove("c_text") os.remove("c_dur") meta.meta("inter.mid", key, outfile)
def makemidi(self): note_names = 'c c# d d# e f f# g g# a a# b'.split() octav10 = { 'c10', 'c#10', 'd10', 'd#10', 'e10', 'f10', 'f#10', 'g10', 'g#10', 'a10', 'a#10', 'b10' } result = NoteSeq() for s in self.song: duration = 1. / s[1] if s[0] == 'r': result.append(Rest(dur=duration)) elif {s[0]}.issubset(octav10): md = s[0][:-2] octave = int(s[0][-2:]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) else: md = s[0][:-1] octave = int(s[0][-1]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) midi = Midi(number_tracks=1, tempo=self.bpm) midi.seq_notes(result, track=0) midi.write(self.path)
def generatingFile(length,name): """ Generating with 3 track, one with chords one with high single notes one with low single notes length: who long one bar can be,example 1:4/4 0.75:3/4 0.5:2/5 """ midi = Midi(3, tempo=60) barSeqnotesLow=generatingBarsNotes(length,dataBaseNotesLow) barSeqnotesHigh=generatingBarsNotes(length,dataBaseNotesHigh) barSeqchords=generatingBarChords(length,dataBaseChords) midi.seq_notes(barSeqnotesLow,track=0) midi.seq_notes(barSeqnotesHigh,track=1) midi.seq_chords(barSeqchords,track=2) midi.write(name+".mid") os.system('fluidsynth -F %s.wav %s %s.mid' %(name,soundfont,name)) os.system('lame --preset insane %s.wav' %name) os.system('rm -Rf %s.wav' %name) #os.system('timidity -Or -o - %s.mid | lame -r - %s.mp3' %(name, name)) os.system('rm -Rf %s.mid' %name)
def demo(): notes1 = NoteSeq("D4 F#8 A Bb4") notes2 = NoteSeq([Note(2, dur=1/4), Note(6, dur=1/8), Note(9, dur=1/8), Note(10, dur=1/4)]) midi = Midi(number_tracks=2, tempo=90) midi.seq_notes(notes1, track=0) midi.seq_notes(notes2, track=1) midi.write("midi/demo.mid")
def canon(operation, operationName): theme = NoteSeq("file://exercise12-bach-canon-quaerendo-invenietis.notes") part1 = theme + theme[2:] + theme[2:11] part2 = theme + theme[2:] + theme[2:4] voice1 = part1 voice2 = operation(part2) midi = Midi(2, tempo=150) midi.seq_notes(voice1, time=3, track=0) midi.seq_notes(voice2, time=13, track=1) midi.write("exercise12-canon-by-%s.mid" % operationName)
def from_pitch_track(times, pitch, sample_rate, filename="tmp.midi"): midi_notes = [to_midi(x) for x in pitch] notes = compact(midi_notes, step=(times[1] - times[0]) / sample_rate / 2) track0 = [Note(x, 0, round(t, 4)) if not np.isnan(x) else Rest(dur=round(t, 4)) for x, t in notes] m = Midi(1, tempo=90) m.seq_notes(track0, track=0) m.write(filename) return filename
def generateMid(tune, tuneNum, lastTune = False): global generationCount global ins midi = Midi(instrument = ins) midi.seq_notes(tune.notes) path = "midi/"+ experimentName +"/gen"+ str(generationCount) +"/" if not os.path.exists(path): os.makedirs(path) if lastTune: midi.write("midi/"+ experimentName +"/gen"+ str(generationCount) +"/*tune"+ str(tuneNum) +"*.mid") else: midi.write(path + "tune"+ str(tuneNum) +".mid")
def main(): note = "D4 E#10 F#8 Gg4 A Bb4 " notes = "" for i in xrange(10): notes += note notes1 = NoteSeq(notes) midi = Midi(1, tempo=90, instrument=0) midi.seq_notes(notes1, track=0) midi.write("demo.mid") play_music("demo.mid")
def create_music(note_seq, given_tempo, given_track, song_name): notes = NoteSeq(note_seq) midi = Midi(1, tempo=given_tempo) midi.seq_notes(notes, track=given_track) file = ("assets\music\/" + song_name + ".mid") # Check if file exists if os.path.isfile(file): midi.write(file) else: print(song_name + ".mid Does not exist")
def create_midi_with_time(music, beat_of_music): global LEN_OF_MUSIC noteSeq = [] for i in range(LEN_OF_MUSIC): if music[i] == 101: noteSeq.append(Rest(dur=beat_of_music[i])) else: noteSeq.append(Note(music[i], dur=beat_of_music[i])) seq = NoteSeq(noteSeq) midi = Midi(number_tracks=1, tempo=90) midi.seq_notes(seq, track=0) midi.write("midi/markov_Gavotte_test1.mid")
def genMusic(sentiment): key = genKey(sentiment) length = genLength(sentiment) notes = randomSeq(length, key, durations) tempo1 = genTempo(sentiment) midi = Midi(1, tempo=tempo1) midi.seq_notes(notes, track=0) midi.write("midi/audio.mid") pygame.init() pygame.mixer.music.load("midi/audio.mid") pygame.mixer.music.play() while pygame.mixer.music.get_busy(): pygame.time.wait(1000)
def main(arguments): # Authenticate on Twitter auth_file = "auth.txt" with open(auth_file) as f: auth_list = f.readlines() f.close() consumer_key = auth_list[0].strip('\n') consumer_secret = auth_list[1].strip('\n') access_token_key = auth_list[2].strip('\n') access_token_secret = auth_list[3].strip('\n') api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token_key, access_token_secret=access_token_secret) # Clean up arguments so it's just a list of hashtags arguments.pop('--help', None) #Remove --help option filename = arguments.pop('FILE', None) + ".mid" # Search for each of the given hashtags individually comboResults = [] for key in sorted(set(arguments)): comboResults.append(api.GetSearch(term="%23" + arguments[key])) # Create list of times and notes # Each hashtag has its own note # Major blues scale C, D, D♯/E♭, E, G, A scale = ["C", "D", "D#", "E", "G", "A"] results = [] for i in range(len(comboResults)): for twt in comboResults[i]: results.append((twt.created_at_in_seconds, scale[i])) # Sort notes in place by time results.sort(key=lambda tup: tup[0]) # Get a list of just notes notes = "" for i in results: notes = notes + i[1] + " " # Create MIDI file score = NoteSeq(notes) midi = Midi(1, tempo=90) midi.seq_notes(score, track=0) midi.write(filename)
def clean_freq(samples): "create freq samples" sample_size = len(samples) chords = [ NoteSeq([Note(classes[i]) for i in sample]) for sample in samples ] midi = Midi(1, tempo=tempo) for i in range(sample_size): midi.seq_chords([chords[i]], time=5 * i) midi.write("temp.mid") subprocess.call("timidity temp.mid -Ow -o temp.wav".split(), stdout=subprocess.PIPE) rate, data = wavfile.read('temp.wav') return channel_freqs( data.T[0])[:sample_size * 10:10].astype(int) / suppress_noise
def proc(centroid_list, num): num_notes = len(centroid_list)/4 notes = [] for i in range(num_notes): if num == 2: a, b = getSingleObj(centroid_list[i], centroid_list[i+1],\ centroid_list[i+2], centroid_list[i+3]) notes.append(a) notes.append(b) elif num == 1: a, b = getSingleObj(centroid_list[i], centroid_list[i+1], 0, 0) notes.append(a) pdb.set_trace() sequence = NoteSeq(notes) midi = Midi(i, tempo=90, instrument=0) midi.seq_notes(sequence, track=0) midi.write("temp.mid") play_music("temp.mid")
def make_midi(midi_path, notes, bpm=120): note_names = 'c c# d d# e f f# g g# a a# b'.split() result = NoteSeq() for n in notes: duration = 1. / n[1] if n[0].lower() == 'r': result.append(Rest(dur=duration)) else: pitch = n[0][:-1] octave = int(n[0][-1]) + 1 pitch_number = note_names.index(pitch.lower()) result.append(Note(pitch_number, octave=octave, dur=duration)) midi = Midi(1, tempo=bpm) midi.seq_notes(result, track=0) midi.write(midi_path)
def generate(self): rand_list = [] for i in range(1500): note = random.choice(self.note) if i<500: number = random.randrange(1,5) elif i>=500 and i<800: number = random.randrange(5,10) elif i>=800 and i<1000: number = random.randrange(10,15) else: number = random.randrange(15, 18) rand_list.append(note + str(number)) print(rand_list) self.sequence = rand_list notes = NoteSeq(' '.join(self.sequence)) midi = Midi(1, tempo=500) midi.seq_notes(notes, track=0) midi.write(file_name) subprocess.call(["timidity", file_name])
def play_chord(notes, tempo, duration): """ notes is a list of indices: [C1, C#1, D1, D#1, E1, F1, F#1, G1, G#1, A1, A#1, B1, C2, C#2, D2, D#2, E2, F2, F#2, G2, G#2, A2, A#2, B2] """ chord = [] for note in notes: chord.append( Note(value=(note % 12), octave=((note // 12) + 4), dur=duration)) filepath = "chord.mid" midi = Midi(tempo=tempo) midi.seq_chords([NoteSeq(chord)]) midi.write(filepath) pg.init() pg.mixer.music.load(filepath) pg.mixer.music.play() os.remove(filepath)
def test_output(x, g): midi = Midi(1, tempo=out_tempo) for i in range(n_classes): dur = 0 vol = 0 for t, v in enumerate(x.T[i]): min_volume = minimal_volume * g[t] / g.mean() if v * v > min_volume: if dur: vol = (vol / dur + v * v / min_volume) * (dur + 1) else: vol = v * v / min_volume dur += 1 elif dur: midi.seq_notes([ Note(classes[i], dur=dur / 4., volume=min(100, int(vol))) ], time=t) dur = 0 vol = 0 midi.write("output.mid") os.system("timidity output.mid")
def get(self, request, piece_id): if not request.user.is_authenticated: return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = MusicPiece.objects.get(id=piece_id) # check if user has permission to download this file (in case JS was # modified). This doesn't prevent user from downloading file # via entering /media/<filename> url if the midi file has been already # generated (but such desperate must know not only the ID, but also # the title) if not (piece.is_public or piece.author == request.user): return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = make_piece(piece) filename = '{}_{}.mid'.format(piece_id, piece.title) # if file exists, don't generate it again - as it's # not possible to modify piece in current version if not path.isfile(path.join(MEDIA_ROOT, filename)): # file doesn't exist - generate it! m = Midi(4, tempo=90) for idx, voice in enumerate(piece.parts): m.seq_notes(piece.parts[voice], idx) m.write(path.join(MEDIA_ROOT, filename)) url = path.join(MEDIA_URL, filename) return JsonResponse({"url": url, "type": "file"})
def abstraction(): a = NoteSeq("C4. Eb8") a1 = a.transp(5).stretch_dur(0.5) a2 = a.inv("Db''") a3 = a1.inv(8) A = a + a1 + a2 + a3 A2 = A.transp(2) B = a1.transp(8) + a1.transp("Eb''") c = NoteSeq([Note(x.value, dur=0.125) for x in a + a1]) C = (c.inv("Ab''") + c.inv(10) + c.stretch_inverval(2).transp(2) + c.inv("G''") + c.inv("E''").stretch_inverval(1) + c.inv("A").stretch_inverval(1)) a4 = a.stretch_dur(2).inv(6) Part1 = A + NoteSeq("C2") + A2 + B Part2 = C + a4 midi = Midi(1, tempo=90) midi.seq_notes(Part1 + Part2, track=0) midi.write("midi/abstraction.mid")
def midicreate(notelist,name,page): NoteList = [] for i in range(len(notelist)): if(notelist[i].scale=='Rest'): chord = Rest(notelist[i].tempo) elif(notelist[i].harmony==0): continue elif(notelist[i].harmony==2): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo)]) elif(notelist[i].harmony==3): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo),Note(notelist[i+2].scale,dur=notelist[i].tempo)]) elif(notelist[i].harmony==4): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo),Note(notelist[i+2].scale,dur=notelist[i].tempo),Note(notelist[i+3].scale,dur=notelist[i].tempo)]) else: chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo)]) NoteList.append(chord) midi = Midi(1, tempo=117) checktract = 0 midi.seq_chords(NoteList,track=0) midi.write(f"/home/ec2-user/Ourchord/MIDI/{page}/{name}.mid") # ---------------------------------------------------- 경로 수정
from pyknon.genmidi import Midi from pyknon.music import NoteSeq notes1 = NoteSeq("C4 D E F G A B C''") notes2 = NoteSeq("r1 r1 C4 D E F G A B C''") table = [ (60, 261.63), # 261.63 (62, 280), # 293.66 (64, 333), # 329.63 (65, 349), # 349.23 (67, 391.99), # 391.99 (69, 444), # 440.00 (71, 510), # 493.88 (72, 523.25) # 523.25 ] m = Midi(2, tempo=120) m.seq_notes(notes1, track=0) m.seq_notes(notes2, track=1) m.change_tuning(0, table) m.write("micro.mid")
def test_seq_chords(self): chords = [NoteSeq("C E G"), NoteSeq("G B D")] midi = Midi() midi.seq_chords(chords)
def test_seq_chords_with_rest(self): chords = [Rest(), NoteSeq("G B D")] midi = Midi() midi.seq_chords(chords)
import pic import random import sys import os import hashlib imgfile = sys.argv[1] sizes=50 here =pic.getImgAttr(imgfile) random.seed(hashlib.new("md5",here).hexdigest()) sett1=[[11,4,6,7,2],[0,2,4,7,9],[1,3,6,8,10],[2,4,6,7,9],[2,4,7,9,11]] j=0 notes1=NoteSeq("") diao=int((pic.getImgLight(imgfile)*5)) for i in here: if (j+1)%sizes<sizes: if abs(here[j%sizes]-here[(j+1)%sizes])>3: if((j%12)%2==0): notes1=notes1+Note(value=sett1[diao][random.randint(0,4)], octave=random.randint(diao+2,diao+3),dur=0.08*random.randint(0,4), volume=127) # else : # notes1=notes1+Note(value=1, octave=1,dur=0.01, volume=0) j+=1 midi = Midi(1, tempo=90,instrument=0) midi.seq_notes(notes1, track=0) midi.write(imgfile+".mid")
def test_write_midifile(self): notes1 = NoteSeq("D4 F#8 R A") midi = Midi(1, tempo=133) midi.seq_notes(notes1, track=0) midi.write(tempfile.TemporaryFile())
#!/usr/bin/env python from pyknon.genmidi import Midi from pyknon.music import Note, NoteSeq filename = "key_of_A.mid" notes1 = NoteSeq("C#4' D8 E A4 C#") midi = Midi(1, tempo=90) midi.seq_notes(notes1, track=0) midi.write(filename) print "wrote ", filename
def gen_midi(filename, note_list): midi = Midi(tempo=120) midi.seq_notes(note_list) midi.write(filename)
#!/usr/bin/env python from pyknon.genmidi import Midi from pyknon.music import NoteSeq notes1 = NoteSeq("D4 F#8 A Bb4") midi = Midi(1, tempo=90) midi.seq_notes(notes1, track=0) midi.write("demo.mid")
def test_seq_notes(self): midi = Midi(2) midi.seq_notes(NoteSeq("C D"), track=0) midi.seq_notes(NoteSeq("D E"), track=1)
#!/usr/bin/env python from pyknon.genmidi import Midi from pyknon.music import Note, NoteSeq notes1 = NoteSeq("D4' F#8 A D4") midi = Midi(1, tempo=90) midi.seq_notes(notes1, track=0) midi.write("octave_test2.mid") #################################### # a melody from Star Wars sw = NoteSeq( "D2, A G16 F#16 E8 D2' A4, G16 F#16 E8 D2' A4, G16 F#16 G8 E1" ) sw_midi = Midi(1, tempo=120) sw_midi.seq_notes(sw, track=0) sw_midi.write("sw.mid") #################################### # Beethoven's "Ode to Joy" theme, one track filename = "beethoven_one_track.mid" bn_part_a = NoteSeq( "B4 B C'' D D C B' A G G A B B A A2 B4 B C'' D D C B, A G G A B A G G" ) bn_part_b = NoteSeq("A4 A B G A B8 C'' B4 G A B8 C'' B4 A G A D") bn_midi = Midi(1, tempo=120) bn_midi.seq_notes(bn_part_a + bn_part_b, track=0)
def foo(): notes = NoteSeq("C4 D4") pprint(notes.verbose) #Value (1-12), Octave (default=5) and Duration note1 = Note("C4") pprint('Note1 verbose = ' + note1.verbose) #<NoteSeq: [<Note: 0, 5, 0.25>, <Note: 2, 5, 0.25>]> pprint('Note1 name = ' + note1.name) note1.midi_number note1.midi_dur note_blank = Note() pprint('Note_blank = ' + note_blank.verbose) #<Note: 0, 5, 0.25> (pitch, octave, duration) note = Note(2, 4, 1, 100) #Value, Octave, Duration, Volume pprint('Programmatic note = ' + note.verbose) #<Note: 1, 4, 1> pprint('Programmatic note name = ' + note.name) #notes = NoteSeq("D4 F#8 A Bb4") #note.harmonize(notes) scale = NoteSeq('G4') scale2 = NoteSeq('A4') scale += scale2 pprint('Scale: ' + scale.verbose) Dmajor = [2,4,6,7,9,11,13,14] foolist = [] for degree in Dmajor: foolist.append(Note(degree, 5, .25, 100)) pprint(foolist) fooSeq = NoteSeq(foolist) pprint('List of notes in NoteSeq: ' + fooSeq.verbose) #[<Note: 3, 5, 1>, <Note: 4, 5, 1>, etc.] midi = Midi(1, tempo=60) midi.seq_notes(fooSeq, track=0) midi.write("foo.mid") seq2 = NoteSeq("C4 D8 E8 C4 D8 E8 C4 D8 E8") midi2 = Midi(1, tempo=60) midi2.seq_notes(seq2, track=0) midi2.write("foo1.mid") chord1 = NoteSeq("C E G") chord2 = NoteSeq("D F A") chord3 = NoteSeq("E G B") chord4 = NoteSeq("F A C") seqlist = [chord1, chord2, chord3, chord4] pprint(seqlist) midi3 = Midi(1, tempo=60) midi3.seq_chords(seqlist, track=0) midi3.write('foochord.mid')
# mode == '-i' | '-m' modes = ['-i', '-m'] mode = sys.argv[1] if mode not in modes: raise Exception('wrong mode') filepath = sys.argv[2] if mode == '-i': image = Image.open(filepath) width, height = image.size print("size: {0} * {1}".format(width, height)) pixels = image.load() # Main Loop, generates notes from pixel RGB values print("Generating note sequence...") notes = pix2noteseq(pixels, width, height) # Generate and write the midi file to disk print("Generating the Midi file...") midi = Midi(number_tracks=1, tempo=90) midi.seq_notes(notes, track=0) print("Writing Midi file...") midi.write(sys.argv[2][:-3] + 'mid') print("Done.") elif mode == '-m': # TODO pass
dur = part_durs[0][loc+j] notes.append(Note(midi_number % 12, midi_number / 12, dur = dur)) elif (part_positions[0][loc] == .5 and part_offsets[0][loc] % 6 == 0 and random.random() > weak_beat_threshold): beat_class_iter.next() beat_class_iter.next() loc += 2 offset = [0] for el in random.choice(weighted_weak_beats): offset.append(el) for j in range(ngram_length): midi_number = part_midis[0][loc+j] + offset[j] dur = part_durs[0][loc+j] notes.append(Note(midi_number % 12, midi_number / 12, dur = dur)) else: midi_number = part_midis[0][loc] dur = part_durs[0][loc] notes.append(Note(midi_number % 12, midi_number / 12, dur = dur)) # pyknon is a horrible horrible library but it gets the job done! midi = Midi(1, tempo = 120) midi.seq_notes(notes, track = 0) midi.write(os.path.join(basedir, str(song_count)+".mid"))
min_octave = 3 max_octave = 7 octave_range = (max_octave - min_octave) + 1 for pixel_group in colors: value = (sum(pixel_group) / len(pixel_group)) % (12 * octave_range) octave = min_octave # Duration of 0.25 means 1 note per beat duration = 0.25 notes.append(Note(value=value, octave=octave, dur=duration)) # for pixels in range(0, number_of_pixels, pixels_per_beat): # group = colors[pixels:pixels+pixels_per_beat] # #Get the number that occurs most in the group, condense it into the range, and set it's duration to the percentage of times it occurs # max_item, occurs = findmaxoccurence(group) # value = max_item % (12 * octave_range) # duration = float(occurs)/float(len(group)) # #duration = legalise_note(duration) # notes.append(Note(value=value, dur=duration, octave=min_octave)) midi = Midi(tempo=desired_bpm) midi.seq_notes(NoteSeq(notes)) midi.write("monaLisa(%sbpm).midi" % (desired_bpm)) print "Playing music - Song Name" music.load("monaLisa(%sbpm).midi" % (desired_bpm)) music.play() while music.get_busy(): # Make sure the program stays open to allow the song to actually play pass
def tutorial(): notes1 = NoteSeq("D4 F#8 A Bb4") midi = Midi(1, tempo=60) midi.seq_notes(notes1, track=0) midi.write("demo.mid")
import sys import random from pyknon.genmidi import Midi from pyknon.music import Note, NoteSeq, Rest from PIL import ImageFont, ImageDraw, Image font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans-Oblique.ttf", 12) sentences = sys.argv[1:] instruments = (70,1,25,50,80) random.seed(sentences[0]) for track, sentence in enumerate(sentences): # if seed_per_sentence: # random.seed(sentence) midi = Midi(tempo=90, instrument=instruments[track]) image = Image.new("L", (800,12), color="white") draw = ImageDraw.Draw(image) draw.text((0, -1), sentence, font=font) notes_list = [] for x in range(0,800): notes = [] for y in range(0,12): vol = int((255 - image.getpixel((x, y)))/2) if vol == 255: continue if vol: notes.append(Note(y, 5, 1/16, vol)) if len(notes): notes_list.append(NoteSeq(random.choice(notes).harmonize(NoteSeq(notes))))
seqLine3 = text_to_bits(LINE3) seq1 = list(seqLine1) seq2 = list(seqLine2) seq3 = list(seqLine3) for i in range(len(seq1)): seq1[i] = int(seq1[i]) seq2[i] = int(seq2[i]) seq3[i] = int(seq3[i]) for i in range(len(seq1)): seq1[i] += SHIFT1 seq2[i] += SHIFT2 seq3[i] += SHIFT3 finalSequence = [] for i in range(len(seq1)): finalSequence.append(seq1[i]) finalSequence.append(seq2[i]) finalSequence.append(seq3[i]) midi = Midi(tempo=SPEED) midi.seq_notes( NoteSeq([ Note(value=x, octave=4, dur=1 / 16, volume=127) for x in finalSequence ])) midi.write("file.mid")
import music21 import numpy as np from pyknon.genmidi import Midi from pyknon.music import * import os #codecs.open("new.txt", encoding="utf-8").read() basedir = "C:\\Users\\Dakota\\rock\\rock_corpus_v2-1" with open(os.path.join(basedir,"a.txt")) as f: s = np.loadtxt(f) notes = [] for note in s: dur = note[0] pc = int(note[1]) % 12 octave = int(note[1]) / 12 print note notes.append(Note(value = pc, octave = octave, dur = dur)) midi = Midi(1, tempo = 90) midi.seq_notes(notes, track = 0) midi.write("hi.mid")
def test_seq_notes_with_more_tracks_than_exists(self): midi = Midi(1) with self.assertRaises(MidiError): midi.seq_notes(NoteSeq("C D"), track=0) midi.seq_notes(NoteSeq("D E"), track=1)
# What happens when you change the second-to-last line to # midi.seq_notes(seq2, time=3) # or # midi.seq_notes(seq2, time=4) # Import pyknon from the git submodule in a subdirectory import sys sys.path.append('./pyknon') from pyknon.genmidi import Midi from pyknon.music import NoteSeq seq1 = NoteSeq("C D E") seq2 = NoteSeq("F G A") # Q. In the following code, what is the order of the notes in the MIDI file? midi = Midi() midi.seq_notes(seq1) midi.seq_notes(seq2) # A. both seq1 and seq2 are written to the same track (1). # The notes of seq2 sequence override notes at the same position in seq1. midi.write("exercise7-F-G-A.mid") # Q. What happens when you change the second-to-last line to # midi.seq_notes(seq2, time=3) midi = Midi() midi.seq_notes(seq1) midi.seq_notes(seq2, time=3) # A. Now the sequence seq2 is offset by 3 beats, # the two sequences seq1 and seq2 appear concatenated. midi.write("exercise7-C-D-E-F-G-A.mid")