def writeMidi(path, seq): if type(seq[0]) == Note: print("One Track") midi = Midi(1, tempo=100) midi.seq_notes(seq) else: midi = Midi(len(seq), tempo=100) i = 0 for track in seq: midi.seq_notes(track, track=i) print(track) i+=1 midi.write(path)
def makemidi(self): note_names = 'c c# d d# e f f# g g# a a# b'.split() octav10 = { 'c10', 'c#10', 'd10', 'd#10', 'e10', 'f10', 'f#10', 'g10', 'g#10', 'a10', 'a#10', 'b10' } result = NoteSeq() for s in self.song: duration = 1. / s[1] if s[0] == 'r': result.append(Rest(dur=duration)) elif {s[0]}.issubset(octav10): md = s[0][:-2] octave = int(s[0][-2:]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) else: md = s[0][:-1] octave = int(s[0][-1]) + 1 note_number = note_names.index(md) result.append(Note(note_number, octave=octave, dur=duration)) midi = Midi(number_tracks=1, tempo=self.bpm) midi.seq_notes(result, track=0) midi.write(self.path)
def generate(key, outfile): # scale = open("./scales/"+key,"r") cipher = open("c_text", "r") dur = open("c_dur", "r") notes1 = [] #using for getting the data and creating lits. while True: ci = cipher.readline() du = dur.readline() if not ci or not du: break c = int(ci) d = float(du) notes1.append(Note(value=c, dur=d)) for note in notes1: print(note) midi = Midi(1, tempo=80) midi.seq_notes(notes1, track=0) midi.write("inter.mid") cipher.close() dur.close() os.remove("c_text") os.remove("c_dur") meta.meta("inter.mid", key, outfile)
def saveSong(self, filename): def nameFile(filename, iterator): return "".join(filename.split(".")[:-1]) + str( iterator) + "." + filename.split(".")[-1] if self.__notes != []: self.__addTrack() # for track in self.__tracks: # track.addPause() fileNameIterator = 0 for track in self.__tracks: midi = Midi(number_tracks=1, instrument=track.getInstrument().value) notes = NoteSeq(track.getNotes()) midi.seq_notes(notes, track=0) midi.write(nameFile(filename, fileNameIterator)) fileNameIterator += 1 fileNameIterator -= 1 if fileNameIterator > 0: for i in range(fileNameIterator): os.system("python midisox.py --combine concatenate " + nameFile(filename, i) + " " + nameFile(filename, i + 1) + " " + nameFile(filename, i + 1)) os.remove(nameFile(filename, i)) if os.path.exists(filename): os.remove(filename) os.rename(nameFile(filename, fileNameIterator), filename)
def gen_midi(rm_seq, cprogr, cprog3, cprog9, filename): midi = Midi(number_tracks=4, tempo=120) midi.seq_notes(rm_seq, track=0) midi.seq_notes(cprogr, track=1) midi.seq_notes(cprog3, track=2) midi.seq_notes(cprog9, track=3) midi.write(filename)
def midi_from_melodies(melodies): notes = [[Note(x%Octave, 4 + x//Octave, 1/4) for x in melody] for melody in melodies] chords = [NoteSeq([melody_notes[i] for melody_notes in notes]) for i in range(len(melody))] midi = Midi(tempo=120) midi.seq_chords(chords) return midi
def demo(): notes1 = NoteSeq("D4 F#8 A Bb4") notes2 = NoteSeq([Note(2, dur=1/4), Note(6, dur=1/8), Note(9, dur=1/8), Note(10, dur=1/4)]) midi = Midi(number_tracks=2, tempo=90) midi.seq_notes(notes1, track=0) midi.seq_notes(notes2, track=1) midi.write("midi/demo.mid")
def crab_canon(): theme2 = NoteSeq("file://canon-crab") rev_theme = theme2.transposition(-12).retrograde() midi = Midi(2, tempo=120) midi.seq_notes(theme2) midi.seq_notes(rev_theme, track=1) midi.write("midi/canon-crab.mid")
def from_pitch_track(times, pitch, sample_rate, filename="tmp.midi"): midi_notes = [to_midi(x) for x in pitch] notes = compact(midi_notes, step=(times[1] - times[0]) / sample_rate / 2) track0 = [Note(x, 0, round(t, 4)) if not np.isnan(x) else Rest(dur=round(t, 4)) for x, t in notes] m = Midi(1, tempo=90) m.seq_notes(track0, track=0) m.write(filename) return filename
def harmonize_scale(forte): pitch_set = pcset.PC_SETS[forte] scale = numbers_to_noteseq(pitch_set) midi = Midi() t0 = midi.seq_notes(scale) t1 = midi.seq_chords(scale.harmonize(interval=3), time=t0 + 1) t2 = midi.seq_chords(scale.harmonize(interval=4), time=t1 + 1) midi.seq_chords(scale.harmonize(interval=5), time=t2 + 1) midi.write("midi/scales.midi")
def main(): note = "D4 E#10 F#8 Gg4 A Bb4 " notes = "" for i in xrange(10): notes += note notes1 = NoteSeq(notes) midi = Midi(1, tempo=90, instrument=0) midi.seq_notes(notes1, track=0) midi.write("demo.mid") play_music("demo.mid")
def create_music(note_seq, given_tempo, given_track, song_name): notes = NoteSeq(note_seq) midi = Midi(1, tempo=given_tempo) midi.seq_notes(notes, track=given_track) file = ("assets\music\/" + song_name + ".mid") # Check if file exists if os.path.isfile(file): midi.write(file) else: print(song_name + ".mid Does not exist")
def josquin(): main_theme = NoteSeq("file://josquin") theme1 = main_theme.stretch_dur(0.66666) theme2 = main_theme[0:24].stretch_dur(2).transp(Note("C")) theme3 = main_theme[0:50] midi = Midi(3, tempo=80) midi.seq_notes(theme1, track=0) midi.seq_notes(theme2, track=1) midi.seq_notes(theme3, track=2) midi.write("midi/josquin.mid")
def canon(): theme1 = NoteSeq("file://canon-quaerendo-invenietis") part1 = theme1 + theme1[2:] + theme1[2:11] part2 = theme1 + theme1[2:] + theme1[2:4] voice1 = part1 voice2 = part2.inversion_startswith(Note(2, 4)) midi = Midi(2, tempo=150) midi.seq_notes(voice1, time=3, track=0) midi.seq_notes(voice2, time=13, track=1) midi.write("midi/canon.mid")
def create_midi_with_time(music, beat_of_music): global LEN_OF_MUSIC noteSeq = [] for i in range(LEN_OF_MUSIC): if music[i] == 101: noteSeq.append(Rest(dur=beat_of_music[i])) else: noteSeq.append(Note(music[i], dur=beat_of_music[i])) seq = NoteSeq(noteSeq) midi = Midi(number_tracks=1, tempo=90) midi.seq_notes(seq, track=0) midi.write("midi/markov_Gavotte_test1.mid")
def genMusic(sentiment): key = genKey(sentiment) length = genLength(sentiment) notes = randomSeq(length, key, durations) tempo1 = genTempo(sentiment) midi = Midi(1, tempo=tempo1) midi.seq_notes(notes, track=0) midi.write("midi/audio.mid") pygame.init() pygame.mixer.music.load("midi/audio.mid") pygame.mixer.music.play() while pygame.mixer.music.get_busy(): pygame.time.wait(1000)
def main(arguments): # Authenticate on Twitter auth_file = "auth.txt" with open(auth_file) as f: auth_list = f.readlines() f.close() consumer_key = auth_list[0].strip('\n') consumer_secret = auth_list[1].strip('\n') access_token_key = auth_list[2].strip('\n') access_token_secret = auth_list[3].strip('\n') api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token_key, access_token_secret=access_token_secret) # Clean up arguments so it's just a list of hashtags arguments.pop('--help', None) #Remove --help option filename = arguments.pop('FILE', None) + ".mid" # Search for each of the given hashtags individually comboResults = [] for key in sorted(set(arguments)): comboResults.append(api.GetSearch(term="%23" + arguments[key])) # Create list of times and notes # Each hashtag has its own note # Major blues scale C, D, D♯/E♭, E, G, A scale = ["C", "D", "D#", "E", "G", "A"] results = [] for i in range(len(comboResults)): for twt in comboResults[i]: results.append((twt.created_at_in_seconds, scale[i])) # Sort notes in place by time results.sort(key=lambda tup: tup[0]) # Get a list of just notes notes = "" for i in results: notes = notes + i[1] + " " # Create MIDI file score = NoteSeq(notes) midi = Midi(1, tempo=90) midi.seq_notes(score, track=0) midi.write(filename)
def convert_chords_to_midi(chord_list, filename): '''Requires pyknon to create the midi...''' from pyknon.music import NoteSeq from pyknon.genmidi import Midi chord_prog = [] midi = Midi(1, tempo=90) for chord in chord_list: chord = chord.upper() chord = chord.replace('B', 'BB') chord = chord.replace('H', 'B') chord_prog.append(NoteSeq(chord)) midi.seq_chords(chord_prog, 0, 0) midi.write(filename)
def clean_freq(samples): "create freq samples" sample_size = len(samples) chords = [ NoteSeq([Note(classes[i]) for i in sample]) for sample in samples ] midi = Midi(1, tempo=tempo) for i in range(sample_size): midi.seq_chords([chords[i]], time=5 * i) midi.write("temp.mid") subprocess.call("timidity temp.mid -Ow -o temp.wav".split(), stdout=subprocess.PIPE) rate, data = wavfile.read('temp.wav') return channel_freqs( data.T[0])[:sample_size * 10:10].astype(int) / suppress_noise
def generateMid(tune, tuneNum, lastTune = False): global generationCount global ins midi = Midi(instrument = ins) midi.seq_notes(tune.notes) path = "midi/"+ experimentName +"/gen"+ str(generationCount) +"/" if not os.path.exists(path): os.makedirs(path) if lastTune: midi.write("midi/"+ experimentName +"/gen"+ str(generationCount) +"/*tune"+ str(tuneNum) +"*.mid") else: midi.write(path + "tune"+ str(tuneNum) +".mid")
def proc(centroid_list, num): num_notes = len(centroid_list)/4 notes = [] for i in range(num_notes): if num == 2: a, b = getSingleObj(centroid_list[i], centroid_list[i+1],\ centroid_list[i+2], centroid_list[i+3]) notes.append(a) notes.append(b) elif num == 1: a, b = getSingleObj(centroid_list[i], centroid_list[i+1], 0, 0) notes.append(a) pdb.set_trace() sequence = NoteSeq(notes) midi = Midi(i, tempo=90, instrument=0) midi.seq_notes(sequence, track=0) midi.write("temp.mid") play_music("temp.mid")
def make_midi(midi_path, notes, bpm=120): note_names = 'c c# d d# e f f# g g# a a# b'.split() result = NoteSeq() for n in notes: duration = 1. / n[1] if n[0].lower() == 'r': result.append(Rest(dur=duration)) else: pitch = n[0][:-1] octave = int(n[0][-1]) + 1 pitch_number = note_names.index(pitch.lower()) result.append(Note(pitch_number, octave=octave, dur=duration)) midi = Midi(1, tempo=bpm) midi.seq_notes(result, track=0) midi.write(midi_path)
def generate(self): rand_list = [] for i in range(1500): note = random.choice(self.note) if i<500: number = random.randrange(1,5) elif i>=500 and i<800: number = random.randrange(5,10) elif i>=800 and i<1000: number = random.randrange(10,15) else: number = random.randrange(15, 18) rand_list.append(note + str(number)) print(rand_list) self.sequence = rand_list notes = NoteSeq(' '.join(self.sequence)) midi = Midi(1, tempo=500) midi.seq_notes(notes, track=0) midi.write(file_name) subprocess.call(["timidity", file_name])
def play_chord(notes, tempo, duration): """ notes is a list of indices: [C1, C#1, D1, D#1, E1, F1, F#1, G1, G#1, A1, A#1, B1, C2, C#2, D2, D#2, E2, F2, F#2, G2, G#2, A2, A#2, B2] """ chord = [] for note in notes: chord.append( Note(value=(note % 12), octave=((note // 12) + 4), dur=duration)) filepath = "chord.mid" midi = Midi(tempo=tempo) midi.seq_chords([NoteSeq(chord)]) midi.write(filepath) pg.init() pg.mixer.music.load(filepath) pg.mixer.music.play() os.remove(filepath)
def writesong(): soprano = NoteSeq() alto = NoteSeq() tenor = NoteSeq() bass = NoteSeq() for x in range(0, SONG_DURATION, 1): if x == SONG_DURATION: soprano += random_notes(I_PITCHLIST_AT, SOP_RANGE, 1, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 1, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 1, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 1, 120) elif x % 4 == 0: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(I_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(I_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(I_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 1: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(IV_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(IV_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(IV_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 2: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(V_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(V_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(V_PITCHLIST_B, BAS_RANGE, 0.5, 90) elif x % 4 == 3: soprano += random_notes(RANDOM_PITCHLIST, SOP_RANGE, 0.0625, 120) alto += random_notes(VI_PITCHLIST_AT, ALT_RANGE, 0.125, 90) tenor += random_notes(VI_PITCHLIST_AT, TEN_RANGE, 0.25, 90) bass += random_notes(VI_PITCHLIST_B, BAS_RANGE, 0.5, 90) midi = Midi(4, tempo=150) midi.seq_notes(soprano, track=0) midi.seq_notes(alto, track=1) midi.seq_notes(tenor, track=2) midi.seq_notes(bass, track=3) return midi
def test_output(x, g): midi = Midi(1, tempo=out_tempo) for i in range(n_classes): dur = 0 vol = 0 for t, v in enumerate(x.T[i]): min_volume = minimal_volume * g[t] / g.mean() if v * v > min_volume: if dur: vol = (vol / dur + v * v / min_volume) * (dur + 1) else: vol = v * v / min_volume dur += 1 elif dur: midi.seq_notes([ Note(classes[i], dur=dur / 4., volume=min(100, int(vol))) ], time=t) dur = 0 vol = 0 midi.write("output.mid") os.system("timidity output.mid")
def get(self, request, piece_id): if not request.user.is_authenticated: return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = MusicPiece.objects.get(id=piece_id) # check if user has permission to download this file (in case JS was # modified). This doesn't prevent user from downloading file # via entering /media/<filename> url if the midi file has been already # generated (but such desperate must know not only the ID, but also # the title) if not (piece.is_public or piece.author == request.user): return JsonResponse({"url": reverse("login"), "type": "redirect"}) piece = make_piece(piece) filename = '{}_{}.mid'.format(piece_id, piece.title) # if file exists, don't generate it again - as it's # not possible to modify piece in current version if not path.isfile(path.join(MEDIA_ROOT, filename)): # file doesn't exist - generate it! m = Midi(4, tempo=90) for idx, voice in enumerate(piece.parts): m.seq_notes(piece.parts[voice], idx) m.write(path.join(MEDIA_ROOT, filename)) url = path.join(MEDIA_URL, filename) return JsonResponse({"url": url, "type": "file"})
def abstraction(): a = NoteSeq("C4. Eb8") a1 = a.transp(5).stretch_dur(0.5) a2 = a.inv("Db''") a3 = a1.inv(8) A = a + a1 + a2 + a3 A2 = A.transp(2) B = a1.transp(8) + a1.transp("Eb''") c = NoteSeq([Note(x.value, dur=0.125) for x in a + a1]) C = (c.inv("Ab''") + c.inv(10) + c.stretch_inverval(2).transp(2) + c.inv("G''") + c.inv("E''").stretch_inverval(1) + c.inv("A").stretch_inverval(1)) a4 = a.stretch_dur(2).inv(6) Part1 = A + NoteSeq("C2") + A2 + B Part2 = C + a4 midi = Midi(1, tempo=90) midi.seq_notes(Part1 + Part2, track=0) midi.write("midi/abstraction.mid")
def midicreate(notelist,name,page): NoteList = [] for i in range(len(notelist)): if(notelist[i].scale=='Rest'): chord = Rest(notelist[i].tempo) elif(notelist[i].harmony==0): continue elif(notelist[i].harmony==2): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo)]) elif(notelist[i].harmony==3): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo),Note(notelist[i+2].scale,dur=notelist[i].tempo)]) elif(notelist[i].harmony==4): chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo),Note(notelist[i+1].scale,dur=notelist[i].tempo),Note(notelist[i+2].scale,dur=notelist[i].tempo),Note(notelist[i+3].scale,dur=notelist[i].tempo)]) else: chord = NoteSeq([Note(notelist[i].scale,dur=notelist[i].tempo)]) NoteList.append(chord) midi = Midi(1, tempo=117) checktract = 0 midi.seq_chords(NoteList,track=0) midi.write(f"/home/ec2-user/Ourchord/MIDI/{page}/{name}.mid") # ---------------------------------------------------- 경로 수정
seqLine3 = text_to_bits(LINE3) seq1 = list(seqLine1) seq2 = list(seqLine2) seq3 = list(seqLine3) for i in range(len(seq1)): seq1[i] = int(seq1[i]) seq2[i] = int(seq2[i]) seq3[i] = int(seq3[i]) for i in range(len(seq1)): seq1[i] += SHIFT1 seq2[i] += SHIFT2 seq3[i] += SHIFT3 finalSequence = [] for i in range(len(seq1)): finalSequence.append(seq1[i]) finalSequence.append(seq2[i]) finalSequence.append(seq3[i]) midi = Midi(tempo=SPEED) midi.seq_notes( NoteSeq([ Note(value=x, octave=4, dur=1 / 16, volume=127) for x in finalSequence ])) midi.write("file.mid")