def strToMidi(msg, fileName): from pyknon.genmidi import Midi from pyknon.music import NoteSeq from pyknon.music import Note notes = { '0' : Note(value=0, octave=5), # Do '1' : Note(value=2, octave=5), # Re '2' : Note(value=4, octave=5), # Mi '3' : Note(value=5, octave=5), # Fa '4' : Note(value=7, octave=5), # Sol '5' : Note(value=9, octave=5), # La '6' : Note(value=11, octave=5), # Si '7' : Note(value=0, octave=6), '8' : Note(value=2, octave=6), '9' : Note(value=4, octave=6), 'a' : Note(value=5, octave=6), 'b' : Note(value=7, octave=6), 'c' : Note(value=9, octave=6), 'd' : Note(value=11, octave=6), 'e' : Note(value=0, octave=7), 'f' : Note(value=2, octave=7) } msgHex = msg.encode('hex'); sequence = NoteSeq('C1') before = '' for i in msgHex: if before == i: sequence.append(Note(value=4, octave=7)) sequence.append(notes[i]) before = i midi = Midi(1, tempo = 290) midi.seq_notes(sequence, track=0) midi.write(fileName)
def generate(key, outfile): # scale = open("./scales/"+key,"r") cipher = open("c_text", "r") dur = open("c_dur", "r") notes1 = [] #using for getting the data and creating lits. while True: ci = cipher.readline() du = dur.readline() if not ci or not du: break c = int(ci) d = float(du) notes1.append(Note(value=c, dur=d)) for note in notes1: print(note) midi = Midi(1, tempo=80) midi.seq_notes(notes1, track=0) midi.write("inter.mid") cipher.close() dur.close() os.remove("c_text") os.remove("c_dur") meta.meta("inter.mid", key, outfile)
def makemidi(self): note_names = 'c c# d d# e f f# g g# a a# b'.split() octav10 = { 'c10', 'c#10', 'd10', 'd#10', 'e10', 'f10', 'f#10', 'g10', 'g#10', 'a10', 'a#10', 'b10' } result = NoteSeq() for s in self.song: duration = 1. / s[1] if s[0] == 'r': result.append(Rest(dur=duration)) elif {s[0]}.issubset(octav10): md = s[0][:-2] octave = int(s[0][-2:]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) else: md = s[0][:-1] octave = int(s[0][-1]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) midi = Midi(number_tracks=1, tempo=self.bpm) midi.seq_notes(result, track=0) midi.write(self.path)
def gen_midi(rm_seq, cprogr, cprog3, cprog9, filename): midi = Midi(number_tracks=4, tempo=120) midi.seq_notes(rm_seq, track=0) midi.seq_notes(cprogr, track=1) midi.seq_notes(cprog3, track=2) midi.seq_notes(cprog9, track=3) midi.write(filename)
def saveSong(self, filename): def nameFile(filename, iterator): return "".join(filename.split(".")[:-1]) + str( iterator) + "." + filename.split(".")[-1] if self.__notes != []: self.__addTrack() # for track in self.__tracks: # track.addPause() fileNameIterator = 0 for track in self.__tracks: midi = Midi(number_tracks=1, instrument=track.getInstrument().value) notes = NoteSeq(track.getNotes()) midi.seq_notes(notes, track=0) midi.write(nameFile(filename, fileNameIterator)) fileNameIterator += 1 fileNameIterator -= 1 if fileNameIterator > 0: for i in range(fileNameIterator): os.system("python midisox.py --combine concatenate " + nameFile(filename, i) + " " + nameFile(filename, i + 1) + " " + nameFile(filename, i + 1)) os.remove(nameFile(filename, i)) if os.path.exists(filename): os.remove(filename) os.rename(nameFile(filename, fileNameIterator), filename)
def pic2music(imgfile): sizes=50 lightness,imageAttr =pic.getImageInfo(imgfile) random.seed(hashlib.new("md5",imageAttr).hexdigest()) sett1=[[11,4,6,7,2],[0,2,4,7,9],[1,3,6,8,10],[2,4,6,7,9],[2,4,7,9,11]] j=0 notes1=NoteSeq("") diao=int((lightness*5)) octave = int((lightness*10)) for i in imageAttr: if (j+1)%sizes<sizes: if abs(imageAttr[j%sizes]-imageAttr[(j+1)%sizes])>3: if((j%12)%2==0): notes1=notes1+Note(value=sett1[diao][random.randint(0,4)], octave=random.randint(octave,octave+3),dur=0.08*random.randint(0,4), volume=127) # else : # notes1=notes1+Note(value=1, octave=1,dur=0.01, volume=0) j+=1 midi = Midi(1, tempo=90,instrument=0) midi.seq_notes(notes1, track=0) midi.write(imgfile+".mid")
def write(self, filename, sequences): midi = Midi(len(sequences), tempo=self.bpm) sequences = enumerate(sequences) note_seqs = [self.get_track(s, i + 1) for i, s in sequences] for track, seq in enumerate(note_seqs): midi.seq_notes(seq, track=track) midi.write(filename)
def demo(): notes1 = NoteSeq("D4 F#8 A Bb4") notes2 = NoteSeq([Note(2, dur=1/4), Note(6, dur=1/8), Note(9, dur=1/8), Note(10, dur=1/4)]) midi = Midi(number_tracks=2, tempo=90) midi.seq_notes(notes1, track=0) midi.seq_notes(notes2, track=1) midi.write("midi/demo.mid")
def crab_canon(filename): theme = NoteSeq("file://%s.notes" % filename) rev_theme = theme.transposition(-12).retrograde() midi = Midi(2, tempo=120) midi.seq_notes(theme) midi.seq_notes(rev_theme, track=1) midi.write("%s.mid" % filename)
def crab_canon(): theme2 = NoteSeq("file://canon-crab") rev_theme = theme2.transposition(-12).retrograde() midi = Midi(2, tempo=120) midi.seq_notes(theme2) midi.seq_notes(rev_theme, track=1) midi.write("midi/canon-crab.mid")
def from_pitch_track(times, pitch, sample_rate, filename="tmp.midi"): midi_notes = [to_midi(x) for x in pitch] notes = compact(midi_notes, step=(times[1] - times[0]) / sample_rate / 2) track0 = [Note(x, 0, round(t, 4)) if not np.isnan(x) else Rest(dur=round(t, 4)) for x, t in notes] m = Midi(1, tempo=90) m.seq_notes(track0, track=0) m.write(filename) return filename
def main(): note = "D4 E#10 F#8 Gg4 A Bb4 " notes = "" for i in xrange(10): notes += note notes1 = NoteSeq(notes) midi = Midi(1, tempo=90, instrument=0) midi.seq_notes(notes1, track=0) midi.write("demo.mid") play_music("demo.mid")
def josquin(): main_theme = NoteSeq("file://josquin") theme1 = main_theme.stretch_dur(0.66666) theme2 = main_theme[0:24].stretch_dur(2).transp(Note("C")) theme3 = main_theme[0:50] midi = Midi(3, tempo=80) midi.seq_notes(theme1, track=0) midi.seq_notes(theme2, track=1) midi.seq_notes(theme3, track=2) midi.write("midi/josquin.mid")
def create_music(note_seq, given_tempo, given_track, song_name): notes = NoteSeq(note_seq) midi = Midi(1, tempo=given_tempo) midi.seq_notes(notes, track=given_track) file = ("assets\music\/" + song_name + ".mid") # Check if file exists if os.path.isfile(file): midi.write(file) else: print(song_name + ".mid Does not exist")
def canon(): theme1 = NoteSeq("file://canon-quaerendo-invenietis") part1 = theme1 + theme1[2:] + theme1[2:11] part2 = theme1 + theme1[2:] + theme1[2:4] voice1 = part1 voice2 = part2.inversion_startswith(Note(2, 4)) midi = Midi(2, tempo=150) midi.seq_notes(voice1, time=3, track=0) midi.seq_notes(voice2, time=13, track=1) midi.write("midi/canon.mid")
def canon(operation, operationName): theme = NoteSeq("file://exercise12-bach-canon-quaerendo-invenietis.notes") part1 = theme + theme[2:] + theme[2:11] part2 = theme + theme[2:] + theme[2:4] voice1 = part1 voice2 = operation(part2) midi = Midi(2, tempo=150) midi.seq_notes(voice1, time=3, track=0) midi.seq_notes(voice2, time=13, track=1) midi.write("exercise12-canon-by-%s.mid" % operationName)
def writeMidi(path, seq): if type(seq[0]) == Note: print("One Track") midi = Midi(1, tempo=100) midi.seq_notes(seq) else: midi = Midi(len(seq), tempo=100) i = 0 for track in seq: midi.seq_notes(track, track=i) print(track) i+=1 midi.write(path)
def create_midi_with_time(music, beat_of_music): global LEN_OF_MUSIC noteSeq = [] for i in range(LEN_OF_MUSIC): if music[i] == 101: noteSeq.append(Rest(dur=beat_of_music[i])) else: noteSeq.append(Note(music[i], dur=beat_of_music[i])) seq = NoteSeq(noteSeq) midi = Midi(number_tracks=1, tempo=90) midi.seq_notes(seq, track=0) midi.write("midi/markov_Gavotte_test1.mid")
def foo(): notes = NoteSeq("C4 D4") pprint(notes.verbose) #Value (1-12), Octave (default=5) and Duration note1 = Note("C4") pprint('Note1 verbose = ' + note1.verbose) #<NoteSeq: [<Note: 0, 5, 0.25>, <Note: 2, 5, 0.25>]> pprint('Note1 name = ' + note1.name) note1.midi_number note1.midi_dur note_blank = Note() pprint('Note_blank = ' + note_blank.verbose) #<Note: 0, 5, 0.25> (pitch, octave, duration) note = Note(2, 4, 1, 100) #Value, Octave, Duration, Volume pprint('Programmatic note = ' + note.verbose) #<Note: 1, 4, 1> pprint('Programmatic note name = ' + note.name) #notes = NoteSeq("D4 F#8 A Bb4") #note.harmonize(notes) scale = NoteSeq('G4') scale2 = NoteSeq('A4') scale += scale2 pprint('Scale: ' + scale.verbose) Dmajor = [2,4,6,7,9,11,13,14] foolist = [] for degree in Dmajor: foolist.append(Note(degree, 5, .25, 100)) pprint(foolist) fooSeq = NoteSeq(foolist) pprint('List of notes in NoteSeq: ' + fooSeq.verbose) #[<Note: 3, 5, 1>, <Note: 4, 5, 1>, etc.] midi = Midi(1, tempo=60) midi.seq_notes(fooSeq, track=0) midi.write("foo.mid") seq2 = NoteSeq("C4 D8 E8 C4 D8 E8 C4 D8 E8") midi2 = Midi(1, tempo=60) midi2.seq_notes(seq2, track=0) midi2.write("foo1.mid") chord1 = NoteSeq("C E G") chord2 = NoteSeq("D F A") chord3 = NoteSeq("E G B") chord4 = NoteSeq("F A C") seqlist = [chord1, chord2, chord3, chord4] pprint(seqlist) midi3 = Midi(1, tempo=60) midi3.seq_chords(seqlist, track=0) midi3.write('foochord.mid')
def genMusic(sentiment): key = genKey(sentiment) length = genLength(sentiment) notes = randomSeq(length, key, durations) tempo1 = genTempo(sentiment) midi = Midi(1, tempo=tempo1) midi.seq_notes(notes, track=0) midi.write("midi/audio.mid") pygame.init() pygame.mixer.music.load("midi/audio.mid") pygame.mixer.music.play() while pygame.mixer.music.get_busy(): pygame.time.wait(1000)
def main(arguments): # Authenticate on Twitter auth_file = "auth.txt" with open(auth_file) as f: auth_list = f.readlines() f.close() consumer_key = auth_list[0].strip('\n') consumer_secret = auth_list[1].strip('\n') access_token_key = auth_list[2].strip('\n') access_token_secret = auth_list[3].strip('\n') api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token_key, access_token_secret=access_token_secret) # Clean up arguments so it's just a list of hashtags arguments.pop('--help', None) #Remove --help option filename = arguments.pop('FILE', None) + ".mid" # Search for each of the given hashtags individually comboResults = [] for key in sorted(set(arguments)): comboResults.append(api.GetSearch(term="%23" + arguments[key])) # Create list of times and notes # Each hashtag has its own note # Major blues scale C, D, D♯/E♭, E, G, A scale = ["C", "D", "D#", "E", "G", "A"] results = [] for i in range(len(comboResults)): for twt in comboResults[i]: results.append((twt.created_at_in_seconds, scale[i])) # Sort notes in place by time results.sort(key=lambda tup: tup[0]) # Get a list of just notes notes = "" for i in results: notes = notes + i[1] + " " # Create MIDI file score = NoteSeq(notes) midi = Midi(1, tempo=90) midi.seq_notes(score, track=0) midi.write(filename)
def generateMid(tune, tuneNum, lastTune = False): global generationCount global ins midi = Midi(instrument = ins) midi.seq_notes(tune.notes) path = "midi/"+ experimentName +"/gen"+ str(generationCount) +"/" if not os.path.exists(path): os.makedirs(path) if lastTune: midi.write("midi/"+ experimentName +"/gen"+ str(generationCount) +"/*tune"+ str(tuneNum) +"*.mid") else: midi.write(path + "tune"+ str(tuneNum) +".mid")
def generateMid(tune, tuneNum, lastTune = False): global generationCount global ins midi = Midi(instrument = ins) midi.seq_notes(tune.notes) path = "midi/"+ experimentName +"/gen"+ str(generationCount) +"/" if not os.path.exists(path): os.makedirs(path) if lastTune: midi.write("midi/"+ experimentName +"/gen"+ str(generationCount) +"/*tune"+ str(tuneNum) +"*.mid") else: midi.write(path + "tune"+ str(tuneNum) +".mid")
def generatingFile(length,name): """ Generating with 3 track, one with chords one with high single notes one with low single notes length: who long one bar can be,example 1:4/4 0.75:3/4 0.5:2/5 """ midi = Midi(3, tempo=60) barSeqnotesLow=generatingBarsNotes(length,dataBaseNotesLow) barSeqnotesHigh=generatingBarsNotes(length,dataBaseNotesHigh) barSeqchords=generatingBarChords(length,dataBaseChords) midi.seq_notes(barSeqnotesLow,track=0) midi.seq_notes(barSeqnotesHigh,track=1) midi.seq_chords(barSeqchords,track=2) midi.write(name+".mid") os.system('fluidsynth -F %s.wav %s %s.mid' %(name,soundfont,name)) os.system('lame --preset insane %s.wav' %name) os.system('rm -Rf %s.wav' %name) #os.system('timidity -Or -o - %s.mid | lame -r - %s.mp3' %(name, name)) os.system('rm -Rf %s.mid' %name)
def harmonize_scale(forte): pitch_set = pcset.PC_SETS[forte] scale = numbers_to_noteseq(pitch_set) midi = Midi() t0 = midi.seq_notes(scale) t1 = midi.seq_chords(scale.harmonize(interval=3), time=t0 + 1) t2 = midi.seq_chords(scale.harmonize(interval=4), time=t1 + 1) midi.seq_chords(scale.harmonize(interval=5), time=t2 + 1) midi.write("midi/scales.midi")
def proc(centroid_list, num): num_notes = len(centroid_list)/4 notes = [] for i in range(num_notes): if num == 2: a, b = getSingleObj(centroid_list[i], centroid_list[i+1],\ centroid_list[i+2], centroid_list[i+3]) notes.append(a) notes.append(b) elif num == 1: a, b = getSingleObj(centroid_list[i], centroid_list[i+1], 0, 0) notes.append(a) pdb.set_trace() sequence = NoteSeq(notes) midi = Midi(i, tempo=90, instrument=0) midi.seq_notes(sequence, track=0) midi.write("temp.mid") play_music("temp.mid")
def make_midi(midi_path, notes, bpm=120): note_names = 'c c# d d# e f f# g g# a a# b'.split() result = NoteSeq() for n in notes: duration = 1. / n[1] if n[0].lower() == 'r': result.append(Rest(dur=duration)) else: pitch = n[0][:-1] octave = int(n[0][-1]) + 1 pitch_number = note_names.index(pitch.lower()) result.append(Note(pitch_number, octave=octave, dur=duration)) midi = Midi(1, tempo=bpm) midi.seq_notes(result, track=0) midi.write(midi_path)
def generate(self): rand_list = [] for i in range(1500): note = random.choice(self.note) if i<500: number = random.randrange(1,5) elif i>=500 and i<800: number = random.randrange(5,10) elif i>=800 and i<1000: number = random.randrange(10,15) else: number = random.randrange(15, 18) rand_list.append(note + str(number)) print(rand_list) self.sequence = rand_list notes = NoteSeq(' '.join(self.sequence)) midi = Midi(1, tempo=500) midi.seq_notes(notes, track=0) midi.write(file_name) subprocess.call(["timidity", file_name])
def test_output(x, g): midi = Midi(1, tempo=out_tempo) for i in range(n_classes): dur = 0 vol = 0 for t, v in enumerate(x.T[i]): min_volume = minimal_volume * g[t] / g.mean() if v * v > min_volume: if dur: vol = (vol / dur + v * v / min_volume) * (dur + 1) else: vol = v * v / min_volume dur += 1 elif dur: midi.seq_notes([ Note(classes[i], dur=dur / 4., volume=min(100, int(vol))) ], time=t) dur = 0 vol = 0 midi.write("output.mid") os.system("timidity output.mid")
def get(self, request, piece_id): if not request.user.is_authenticated: return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = MusicPiece.objects.get(id=piece_id) # check if user has permission to download this file (in case JS was # modified). This doesn't prevent user from downloading file # via entering /media/<filename> url if the midi file has been already # generated (but such desperate must know not only the ID, but also # the title) if not (piece.is_public or piece.author == request.user): return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = make_piece(piece) filename = '{}_{}.mid'.format(piece_id, piece.title) # if file exists, don't generate it again - as it's # not possible to modify piece in current version if not path.isfile(path.join(MEDIA_ROOT, filename)): # file doesn't exist - generate it! m = Midi(4, tempo=90) for idx, voice in enumerate(piece.parts): m.seq_notes(piece.parts[voice], idx) m.write(path.join(MEDIA_ROOT, filename)) url = path.join(MEDIA_URL, filename) return JsonResponse({"url": url, "type": "file"})
def abstraction(): a = NoteSeq("C4. Eb8") a1 = a.transp(5).stretch_dur(0.5) a2 = a.inv("Db''") a3 = a1.inv(8) A = a + a1 + a2 + a3 A2 = A.transp(2) B = a1.transp(8) + a1.transp("Eb''") c = NoteSeq([Note(x.value, dur=0.125) for x in a + a1]) C = (c.inv("Ab''") + c.inv(10) + c.stretch_inverval(2).transp(2) + c.inv("G''") + c.inv("E''").stretch_inverval(1) + c.inv("A").stretch_inverval(1)) a4 = a.stretch_dur(2).inv(6) Part1 = A + NoteSeq("C2") + A2 + B Part2 = C + a4 midi = Midi(1, tempo=90) midi.seq_notes(Part1 + Part2, track=0) midi.write("midi/abstraction.mid")
def writesong(): soprano = NoteSeq() alto = NoteSeq() tenor = NoteSeq() bass = NoteSeq() for x in range(0, SONG_DURATION, 1): if x == SONG_DURATION: soprano += random_notes(I_PITCHLIST_AT, SOP_RANGE, 1, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 1, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 1, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 1, 120) elif x % 4 == 0: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 1: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(IV_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(IV_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(IV_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 2: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(V_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(V_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(V_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 3: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(VI_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(VI_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(VI_PITCHLIST_B, BAS_RANGE, 0.5, 90) midi = Midi(4, tempo=150) midi.seq_notes(soprano, track=0) midi.seq_notes(alto, track=1) midi.seq_notes(tenor, track=2) midi.seq_notes(bass, track=3) return midi
# mode == '-i' | '-m' modes = ['-i', '-m'] mode = sys.argv[1] if mode not in modes: raise Exception('wrong mode') filepath = sys.argv[2] if mode == '-i': image = Image.open(filepath) width, height = image.size print("size: {0} * {1}".format(width, height)) pixels = image.load() # Main Loop, generates notes from pixel RGB values print("Generating note sequence...") notes = pix2noteseq(pixels, width, height) # Generate and write the midi file to disk print("Generating the Midi file...") midi = Midi(number_tracks=1, tempo=90) midi.seq_notes(notes, track=0) print("Writing Midi file...") midi.write(sys.argv[2][:-3] + 'mid') print("Done.") elif mode == '-m': # TODO pass
from pyknon.music import Note, Rest duration = 1 def generateSound (inputS) #--------Construction of Note---------------- note_sequence = [] #Takes inputS and maps it to a semitone. #0-3 -> Eb 2nd Octave , 252-255 -> F# 7th Octave scaled_value = int(inputS / 4) #Scales value from 0-63 note_value = (scaled_value + 27) % 12 octave = int((scaled_value + 27 - note_value) / 12) note = Note(note_value, octave, duration) note_sequence.append(note) #Adds them to the note sequence sound = NoteSeq(note_sequence) #-------Creating MIDI file ------------- m2 = Midi(2, tempo=60, channel=[0,9], instrument= [3,3]) #See https://www.midi.org/specifications/item/gm-level-1-sound-set for instrument codes m2.seq_notes(sound, track=0, channel=0) m2.write("output.mid")
from pyknon.genmidi import Midi from pyknon.music import NoteSeq, Note, Rest C_major = NoteSeq("A") midi = Midi(1, tempo=80) midi.seq_notes(C_major) midi.write("A4.mid")
def test_seq_notes_with_more_tracks_than_exists(self): midi = Midi(1) with self.assertRaises(MidiError): midi.seq_notes(NoteSeq("C D"), track=0) midi.seq_notes(NoteSeq("D E"), track=1)
from pyknon.genmidi import Midi from pyknon.music import NoteSeq # Notes on two tracks using the defaults notes1 = NoteSeq("C4.'' B8' A4 D") notes2 = NoteSeq("E4 F G4. A8") m = Midi(2, tempo=100, instrument=[12, 14]) m.seq_notes(notes1, track=0) m.seq_notes(notes2, track=1) m.write("tracks.mid") # Chords on two tracks using the defaults chords1 = [NoteSeq("C2 E G"), NoteSeq("G2 B D")] chords2 = [NoteSeq("C,4 E"), NoteSeq("E, G"), NoteSeq("G, B"), NoteSeq("B, D'")] midi = Midi(2, tempo=60, instrument=[40, 20]) midi.seq_chords(chords1, track=0) midi.seq_chords(chords2, track=1) midi.write("chords.mid") # Notes on two tracks using percussion # In the MIDI library, the tracks and channels are numbered from 0, # While the MIDI Standard is numbered from 1, # So to use percussion you must use channel 9 in the library n1 = NoteSeq("C4 D E F") n2 = NoteSeq("C8 C G, G C' C G, G")
def key_number(n): return n.octave * 12 + n.value - first_note.value def note_from_key_number(k): return Note(k - 51) def intervals(notes): interval = [] for n in range(len(notes)-1): interval.append(notes[n+1] - notes[n]) return interval piano_notes = map(note_from_key_number, range(88)) midi = Midi(1, tempo=80) midi.seq_notes(piano_notes, track=0) midi.write("piano_keys.mid") ####### Next we'll examine a major and minor scale, and look at the intervals between ####### each of their notes middle_c = Note("C,") # key_number=39 note_nums = map(key_number, piano_notes) print "All piano notes:", note_nums print "Middle C key number:", key_number(middle_c) # A, means drop the octave, C' means raise the octave. # Also, in a NoteSeq, Pyknon stays in the same octave unless explicitly # changed by using either , or '. C_major = NoteSeq("C D E F G A B C'' ") A_minor = NoteSeq("A, B C' D E F G A")
def create_midi(self): notes_seq = NoteSeq(str(self.notes)) midi = Midi(1, tempo=self.tempo) midi.seq_notes(notes_seq, track=0) midi.write('songs/' + str(self.song_name) + '.mid')
def gen_midi(filename, note_list): midi = Midi(tempo=120) midi.seq_notes(note_list) midi.write(filename)
seqLine3 = text_to_bits(LINE3) seq1 = list(seqLine1) seq2 = list(seqLine2) seq3 = list(seqLine3) for i in range(len(seq1)): seq1[i] = int(seq1[i]) seq2[i] = int(seq2[i]) seq3[i] = int(seq3[i]) for i in range(len(seq1)): seq1[i] += SHIFT1 seq2[i] += SHIFT2 seq3[i] += SHIFT3 finalSequence = [] for i in range(len(seq1)): finalSequence.append(seq1[i]) finalSequence.append(seq2[i]) finalSequence.append(seq3[i]) midi = Midi(tempo=SPEED) midi.seq_notes( NoteSeq([ Note(value=x, octave=4, dur=1 / 16, volume=127) for x in finalSequence ])) midi.write("file.mid")
# midi.seq_notes(seq2, time=3) # or # midi.seq_notes(seq2, time=4) # Import pyknon from the git submodule in a subdirectory import sys sys.path.append('./pyknon') from pyknon.genmidi import Midi from pyknon.music import NoteSeq seq1 = NoteSeq("C D E") seq2 = NoteSeq("F G A") # Q. In the following code, what is the order of the notes in the MIDI file? midi = Midi() midi.seq_notes(seq1) midi.seq_notes(seq2) # A. both seq1 and seq2 are written to the same track (1). # The notes of seq2 sequence override notes at the same position in seq1. midi.write("exercise7-F-G-A.mid") # Q. What happens when you change the second-to-last line to # midi.seq_notes(seq2, time=3) midi = Midi() midi.seq_notes(seq1) midi.seq_notes(seq2, time=3) # A. Now the sequence seq2 is offset by 3 beats, # the two sequences seq1 and seq2 appear concatenated. midi.write("exercise7-C-D-E-F-G-A.mid") # Q. What happens when you change the second-to-last line to
return NoteSeq(melody) #generate the twelve tone matrix matGen = GenerateMatrix() matrix = matGen.genMatrix() #generate the music randomly music = RandomMusic(matrix) highScale = matrix[3] seqHigh = music.genNotes("''", highScale) seqMidHigh = music.genRandRange(["'", "''"]) midScale = matrix[7] seqMid = music.genNotes(",", midScale) lowScale = matrix[4] seqBass = music.genNotes(",,", lowScale) #generate midi file midi = Midi(4, 90) midi.seq_notes(seqHigh, 0) midi.seq_notes(seqMidHigh, 1) midi.seq_notes(seqMid, 2) midi.seq_notes(seqBass, 3) #you'll probably have to change this! midi.write('/Users/tony/Desktop/song.mid')
from pyknon.genmidi import Midi from pyknon.music import NoteSeq notes1 = NoteSeq("D4 F#8 A Bb4") midi = Midi(1, tempo=90, instrument=53) midi.seq_notes(notes1, track=0) midi.write("output/demo.mid")
from pyknon.genmidi import Midi from pyknon.music import NoteSeq # Notes on two tracks using the defaults notes1 = NoteSeq("C4.'' B8' A4 D") notes2 = NoteSeq("E4 F G4. A8") m = Midi(2, tempo=100, instrument=[12, 14]) m.seq_notes(notes1, track=0) m.seq_notes(notes2, track=1) m.write("tracks.mid") # Chords on two tracks using the defaults chords1 = [NoteSeq("C2 E G"), NoteSeq("G2 B D")] chords2 = [ NoteSeq("C,4 E"), NoteSeq("E, G"), NoteSeq("G, B"), NoteSeq("B, D'") ] midi = Midi(2, tempo=60, instrument=[40, 20]) midi.seq_chords(chords1, track=0) midi.seq_chords(chords2, track=1) midi.write("chords.mid") # Notes on two tracks using percussion # In the MIDI library, the tracks and channels are numbered from 0,
def test_write_midifile(self): notes1 = NoteSeq("D4 F#8 R A") midi = Midi(1, tempo=133) midi.seq_notes(notes1, track=0) midi.write(tempfile.TemporaryFile())
def test_seq_notes(self): midi = Midi(2) midi.seq_notes(NoteSeq("C D"), track=0) midi.seq_notes(NoteSeq("D E"), track=1)
octave=int(note2_str[-1]), dur=1 / 16, volume=volume_note2) ]) note3_str = note_above(int_to_note(note) + str(octave), 7) note3 = NoteSeq([ Note(value=note_to_int(note3_str), octave=int(note3_str[-1]), dur=1 / 16, volume=volume_note3) ]) midi = Midi(number_tracks=3, tempo=60, instrument=0) midi.seq_notes(note1, track=0) midi.seq_notes(note2, track=1) midi.seq_notes(note3, track=2) midi.write(filepath + "major_root/" + int_to_note(note).lower() + "_major_" + int_to_note(note).lower() + "_" + str(octave) + "_" + str(volume_note1) + str(volume_note2) + str(volume_note3) + ".mid") if done == 1: break # major first inversion done = 0 for octave in range(2, 9):
#!/usr/bin/env python from pyknon.genmidi import Midi from pyknon.music import Note, NoteSeq notes1 = NoteSeq("D4' F#8 A D4") midi = Midi(1, tempo=90) midi.seq_notes(notes1, track=0) midi.write("octave_test2.mid") #################################### # a melody from Star Wars sw = NoteSeq( "D2, A G16 F#16 E8 D2' A4, G16 F#16 E8 D2' A4, G16 F#16 G8 E1" ) sw_midi = Midi(1, tempo=120) sw_midi.seq_notes(sw, track=0) sw_midi.write("sw.mid") #################################### # Beethoven's "Ode to Joy" theme, one track filename = "beethoven_one_track.mid" bn_part_a = NoteSeq( "B4 B C'' D D C B' A G G A B B A A2 B4 B C'' D D C B, A G G A B A G G" ) bn_part_b = NoteSeq("A4 A B G A B8 C'' B4 G A B8 C'' B4 A G A D") bn_midi = Midi(1, tempo=120) bn_midi.seq_notes(bn_part_a + bn_part_b, track=0)
def tutorial(): notes1 = NoteSeq("D4 F#8 A Bb4") midi = Midi(1, tempo=60) midi.seq_notes(notes1, track=0) midi.write("demo.mid")
#!/usr/bin/env python from pyknon.genmidi import Midi from pyknon.music import Note, Rest, NoteSeq filename = "rest.mid" midi = Midi(1, tempo=120) midi.seq_notes(NoteSeq("r1")) midi.write(filename) print "wrote ", filename
h = [] s = [] v = [] for i in data: print(i) c = colorsys.rgb_to_hsv(i[0]/255,i[1]/255,i[2]/255) print(c) all.append(c) #h.append(int(c[0]*12)) #s.append(int(c[1]*8)+1) #v.append(c[2]) if c[2] > 0.5: s = 1/8 else: s = 1/16 seq.append( (int(c[0]*12),int(c[1]*8)+1,s)) #print all baseSeq = [] for i in range(int(len(seq)/4)): baseSeq.append([0,4,7][i%3]) #seq = chain.from_iterable(data) midi = Midi(number_tracks=2,tempo=120) #Note(note from 0-11, pitch(octave), length(in beat)) #midi.seq_notes(NoteSeq([Note(x%12, 4, 1/8) for x in seq])) midi.seq_notes(NoteSeq([Note(x, 4, 1/4) for x in baseSeq]),track=0) midi.seq_notes(NoteSeq([Note(x[0], x[1], x[2]) for x in seq]),track=1) midi.write("output.mid") #fluidsynth -T wav -F output.wav Piano.sf2
def test_output(notes): midi = Midi(1, tempo=360) for i in notes: midi.seq_notes([i], time=i.time) midi.write("output.mid") os.system("timidity output.mid")
min_octave = 3 max_octave = 7 octave_range = (max_octave - min_octave) + 1 for pixel_group in colors: value = (sum(pixel_group) / len(pixel_group)) % (12 * octave_range) octave = min_octave # Duration of 0.25 means 1 note per beat duration = 0.25 notes.append(Note(value=value, octave=octave, dur=duration)) # for pixels in range(0, number_of_pixels, pixels_per_beat): # group = colors[pixels:pixels+pixels_per_beat] # #Get the number that occurs most in the group, condense it into the range, and set it's duration to the percentage of times it occurs # max_item, occurs = findmaxoccurence(group) # value = max_item % (12 * octave_range) # duration = float(occurs)/float(len(group)) # #duration = legalise_note(duration) # notes.append(Note(value=value, dur=duration, octave=min_octave)) midi = Midi(tempo=desired_bpm) midi.seq_notes(NoteSeq(notes)) midi.write("monaLisa(%sbpm).midi" % (desired_bpm)) print "Playing music - Song Name" music.load("monaLisa(%sbpm).midi" % (desired_bpm)) music.play() while music.get_busy(): # Make sure the program stays open to allow the song to actually play pass
from pyknon.genmidi import Midi from pyknon.music import NoteSeq notes1 = NoteSeq("C4 D E F G A B C''") notes2 = NoteSeq("r1 r1 C4 D E F G A B C''") table = [ (60, 261.63), # 261.63 (62, 280), # 293.66 (64, 333), # 329.63 (65, 349), # 349.23 (67, 391.99), # 391.99 (69, 444), # 440.00 (71, 510), # 493.88 (72, 523.25) # 523.25 ] m = Midi(2, tempo=120) m.seq_notes(notes1, track=0) m.seq_notes(notes2, track=1) m.change_tuning(0, table) m.write("micro.mid")
from pyknon.genmidi import Midi from pyknon.music import NoteSeq filename = "marenzio.mid" top_part = NoteSeq(""" A2 A A4 Bb2 Bb4 A2 A r4 D4'' D4. C8'' D4 Eb C2 Bb' D4'' C2 Bb4' A4. Bb8 C2'' C r4 D D2. C4'' Bb4' A G2 G A4 A8 Bb C4'' Bb' A1 G1. """) middle_part = NoteSeq(""" F#2 F# F#4 G2 G4 F#2 F# r4 Bb4 Bb4. C8'' Bb4' Bb A2 Bb Bb4 A2 G4 F4. G8 A2 A r4 Bb4 Bb2. A4 G F E2 E F4 E8 D E4 G2 F#8 E F#2 G1. """) bottom_part = NoteSeq(""" D2, D D4 G2,, G4 D2, D2 Bb,, Bb4., A8 G4 Eb4 F2 Bb,, Bb4,, F2, G4 D4. Eb8 F2 F G2 G2. F4 Eb D C2 C D4 C8 Bb, A4 G D1' G1., """) # turn the volume down on the string instruments for part in [top_part, middle_part, bottom_part]: for note in part: note.volume = 45 midi = Midi(number_tracks=3, tempo=105, instrument=48) midi.seq_notes(top_part, track=0) midi.seq_notes(middle_part, track=1) midi.seq_notes(bottom_part, track=2) midi.write(filename)