#coding=utf-8 import mido from mido import MidiTrack, MidiFile, Message CurrentTime = 16080 #绝对时间轴 Track, Music = MidiTrack(), MidiFile() SongName = "Pianoboy\\105 days" #待操作音乐名称 path1 = "data\Sequence\Main" #主旋律序列存放地址 path2 = "output.mid" #输出音乐存放地址 Instrument = 1 #默认乐器为钢琴 def addNote(StartTime, Notes, FinishTime): global CurrentTime, Track if (StartTime - FinishTime > 0 or StartTime < CurrentTime): print(1) Track.append( Message("note_on", note=int(Notes[0]), velocity=80, channel=0, time=StartTime - CurrentTime)) #加入note_on 指令 if (len(Notes) > 1): for x in range(1, len(Notes)): Track.append( Message("note_on", note=int(Notes[x]), velocity=80, channel=0, time=0)) Track.append(
def listtomidi(songl, tempo, filename, insturment): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) filename = filename.replace("Generated/", "") track.append(MetaMessage("marker", text=filename)) track.append(MetaMessage("track_name", name="insturment")) i = 0 import GenPlot while GenPlot.findinsturment(i) != insturment: i += 1 if insturment == "Drums": i = 114 break if i < 128: i = 1 break track.append(MetaMessage('set_tempo', tempo=tempo, time=0)) track.append( MetaMessage("time_signature", numerator=4, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8, time=0)) track.append(MetaMessage('key_signature', key="C", time=0)) track.append( MetaMessage('smpte_offset', frame_rate=24, hours=32, minutes=0, seconds=0, frames=0, sub_frames=0, time=0)) track.append(Message('program_change', program=i, time=0)) bpm = (tempo) / (250000 / 240) lastnote = -1 for part in songl: for notes in part: notelist = GenerateModel.notelisttransform(notes)[0] duration = int(round(100000 * notelist[1] / bpm)) * 3 if duration == 0: duration = int(round(100000 * 0.5 / bpm)) * 3 pause = (100000 / bpm) - duration if pause < 0: pause = 0 for note in notelist[0]: if notelist[0].index(note) == 0: time = int(pause) else: time = 0 track.append( Message('note_on', note=librosa.note_to_midi(note.replace(".", "")), velocity=127, time=time)) if lastnote != -1: track.append( Message('note_off', note=librosa.note_to_midi(lastnote), velocity=0, time=int(duration))) lastnote = note.replace(".", "") # time+=random.choice(durations) return mid
# # display reconstruction # ax = plt.subplot(2, n, i + n) # plt.imshow(decoded_imgs[i].reshape(28, 28)) # plt.gray() # ax.get_xaxis().set_visible(False) # ax.get_yaxis().set_visible(False) # plt.show() # %% #---------------------------------------------------- MIDI Write file ----------------------------------# composed = MidiFile() composed.ticks_per_beat = 96 track = MidiTrack() track.append(Message('note_on', note=64, velocity=64, time=0)) track.append(Message('note_on', note=62, velocity=64, time=200)) track.append(Message('note_on', note=60, velocity=64, time=200)) track.append(Message('note_off', note=64, velocity=64, time=200)) track.append(Message('note_off', note=62, velocity=64, time=200)) track.append(Message('note_off', note=60, velocity=64, time=200)) composed.tracks.append(track) composed.save('composed.mid') # %% compo = MidiFile('composed.mid')
def audio2midi(wavfile, modelpath): #---------- Removes Silence under Threshold----------# silences = remove_silence(wavfile, thresh=-50) #-------------------------------------------------------- ############ wav file loading ############## y, sr = librosa.load(wavfile, sr=44100) ##---------------------------------------- ####### Onset and Tempo detection ########## onset = librosa.onset.onset_detect(y=y, sr=sr, backtrack=True) onset_env = librosa.onset.onset_strength(y=y, sr=sr) onset_sec = librosa.frames_to_time(onset, sr=sr) onset_frames = librosa.frames_to_samples(onset) tempo = int(librosa.beat.tempo(onset_envelope=onset_env, sr=sr)) print(f'Detected Tempo: {tempo}') tempo = mido.bpm2tempo(tempo) ##-------------------------------------------- #---------grab onset and offset--------------- beats = onset_offset(sr=sr, onset=onset, onsetframes=onset_frames, silences=silences) ####### Grabs onset and predicts kick, snare, hihat ############ predictions = [] model, class_names = load_model_ext(modelpath) for beat in beats: current_signal = y[beat[0]:beat[1]] pred = predict_one(model, class_names, current_signal, sr)[0] predictions.append(pred) print(pred) ##### setup intial midi parameters ########### mid = MidiFile() track = MidiTrack() mid.tracks.append(track) tick_resolution = 480 onset_ticks = [int(round(mido.second2tick(x, ticks_per_beat=tick_resolution, tempo=tempo))) for x in onset_sec] track.append(mido.MetaMessage('set_tempo', tempo=tempo)) ############## Creates Midi File ################## for i, tick in enumerate(onset_ticks): previous_tick = 0 drum_notes = {'kick':36, 'snare':38, 'hihat':42} current_note = drum_notes[f'{predictions[i]}'] if i == 0: current_tick = tick track.append(Message('note_on', note=current_note, velocity=64, time=current_tick)) track.append(Message('note_off', note=current_note, time=0)) else: current_tick = tick - onset_ticks[i - 1] track.append(Message('note_on', note=current_note, velocity=64, time=current_tick)) track.append(Message('note_off', note=current_note, time=0)) mid.save('resources/beatbox.mid') K.clear_session() return round(mido.tempo2bpm(tempo))
import mido from mido import Message, MidiFile, MidiTrack #训练 #训练 #测试 mid = MidiFile() track = MidiTrack() track2 = MidiTrack() track3 = MidiTrack() track4 = MidiTrack() mid.tracks.append(track) mid.tracks.append(track2) mid.tracks.append(track3) mid.tracks.append(track4) tra = [track,track2,track3,track4] def yin(yin,pai,qian=0,unit=track,tong=0,liang=64,qi=2): #yin是指哪个音,pai是指时间(节拍),qian是noteon里的time,liang是指音量,tong是通道, if type(yin)== str: yin = num(yin) unit.append(Message('program_change',channel=0,program=qi,time=0)) #pi是乐器 默认钢琴(2) unit.append(Message('note_on', note=yin, velocity=liang, time=int(qian),channel=tong)) #音开始 unit.append(Message('note_off', note=yin, velocity=liang, time=int(pai),channel=tong)) def beat(time): #与mido的拍子互换 time /= 60 * 1000 time = 1/time return time def myin(fu,pai,time=120,du=None,chord=None,bef=None,note="low",tr=track,yue=2): #和声版 pig = int(beat(time)) for i in range(len(pai)):
def __init__(self): mid = MidiFile() track1 = MidiTrack() mid.tracks.append(track1)
def write_events_to_midi(start_time, note_events, pedal_events, midi_path): """Write out note events to MIDI file. Args: start_time: float note_events: list of dict, e.g. [ {'midi_note': 51, 'onset_time': 696.63544, 'offset_time': 696.9948, 'velocity': 44}, {'midi_note': 58, 'onset_time': 696.99585, 'offset_time': 697.18646, 'velocity': 50} ...] midi_path: str """ from mido import Message, MidiFile, MidiTrack, MetaMessage # This configuration is the same as MIDIs in MAESTRO dataset ticks_per_beat = 384 beats_per_second = 2 ticks_per_second = ticks_per_beat * beats_per_second microseconds_per_beat = int(1e6 // beats_per_second) midi_file = MidiFile() midi_file.ticks_per_beat = ticks_per_beat # Track 0 track0 = MidiTrack() track0.append(MetaMessage('set_tempo', tempo=microseconds_per_beat, time=0)) track0.append( MetaMessage('time_signature', numerator=4, denominator=4, time=0)) track0.append(MetaMessage('end_of_track', time=1)) midi_file.tracks.append(track0) # Track 1 track1 = MidiTrack() # Message rolls of MIDI message_roll = [] for note_event in note_events: # Onset message_roll.append({ 'time': note_event['onset_time'], 'midi_note': note_event['midi_note'], 'velocity': note_event['velocity'] }) # Offset message_roll.append({ 'time': note_event['offset_time'], 'midi_note': note_event['midi_note'], 'velocity': 0 }) if pedal_events: for pedal_event in pedal_events: message_roll.append({ 'time': pedal_event['onset_time'], 'control_change': 64, 'value': 127 }) message_roll.append({ 'time': pedal_event['offset_time'], 'control_change': 64, 'value': 0 }) # Sort MIDI messages by time message_roll.sort(key=lambda note_event: note_event['time']) previous_ticks = 0 for message in message_roll: this_ticks = int((message['time'] - start_time) * ticks_per_second) if this_ticks >= 0: diff_ticks = this_ticks - previous_ticks previous_ticks = this_ticks if 'midi_note' in message.keys(): track1.append( Message('note_on', note=message['midi_note'], velocity=message['velocity'], time=diff_ticks)) elif 'control_change' in message.keys(): track1.append( Message('control_change', channel=0, control=message['control_change'], value=message['value'], time=diff_ticks)) track1.append(MetaMessage('end_of_track', time=1)) midi_file.tracks.append(track1) midi_file.save(midi_path)
import numpy as np from mido import Message, MidiFile, MidiTrack # ... data for test ... # IMPORTANT time is measured in milliseconds # TODO: detect correctness of data numbers = np.array([60, 60, 64, 67]) times = np.array([3000, 3000, 6000, 9000]) lengths = np.array([8000, 4000, 2000, 1000]) delta = 10 if __name__ == "__main__": mid = MidiFile() tracks = [] for i in range(numbers.shape[0]): tracks.append(MidiTrack()) mid.tracks.append(tracks[i]) tracks[i].append(Message('program_change', program=33, time=delta)) tracks[i].append( Message('note_on', note=numbers[i], velocity=124, time=times[i])) tracks[i].append( Message('note_off', note=numbers[i], velocity=0, time=lengths[i])) mid.save('tmp.mid')
def main(): #Get options from input parser = argparse.ArgumentParser( description='Superpermutation to midi converter') parser.add_argument( 'inputfile', help='The file containing the superpermutation to convert.') parser.add_argument('outputfile', nargs='?', default='inputfile', help='The file to store the midi output in.') parser.add_argument( '-s', '--scale', nargs='?', default="default", help='Scale to translate the numbers into. Possible scales:\ major, natural-minor, harmonic-minor, whole-note') parser.add_argument( '-p', '--play', action='store_true', help= 'Play back the midifile when running the script(requires python-rtmidi)' ) parser.add_argument( '-I', '--instrument', default=46, help='General MIDI instrument number from 0 to 127. Default: 46 (harp)' ) parser.add_argument('-l', '--note_length', default='edge-weight', help='The method to decide note lengths.\ Possible values are: edge-weight, free-space, even') args = parser.parse_args() input_string = open(args.inputfile, 'r').read().strip() superpermutation = np.array(list(input_string), dtype=int) #Make sure it is zero indexed superpermutation -= superpermutation.min() N = superpermutation.max() + 1 note_lengths = np.zeros_like(superpermutation) scale = args.scale if args.scale == "default": if N == 7: scale = "major" elif N == 6: scale = "whole-note" elif N == 5: scale = "major-pentatonic" scaleFunction = { "major": partial(numberToScale, scale=Scales.major), "natural-minor": partial(numberToScale, scale=Scales.natural_minor), "harmonic-minor": partial(numberToScale, scale=Scales.harmonic_minor), "whole-note": partial(numberToScale, scale=Scales.whole_note), "major-pentatonic": partial(numberToScale, scale=Scales.major_pentatonic), "miyako-bushi": partial(numberToScale, scale=Scales.miyako_bushi) }.get(scale, "major") if args.note_length == 'free-space': for i, number in enumerate(superpermutation): num_perms = 0 # Length based on how far it is to the same value on both sides for j in range(1, N): if i - j < 0 or superpermutation[i - j] == number: break num_perms += 1 for j in range(1, N): if i + j >= superpermutation.size or superpermutation[ i + j] == number: break num_perms += 1 note_lengths[i] = num_perms - N + 1 elif args.note_length == 'edge-weight': for i, number in enumerate(superpermutation): weight = 0 for j in range(i + 1, i + N + 1): if j >= N and j < superpermutation.size: if isLegalPermutation(superpermutation[j - N:j]): break weight += 1 note_lengths[i] = N - weight - 1 else: note_lengths[:] = N - 1 # Fix the end values note_lengths[0:N - 1] = N - 1 mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(Message('program_change', program=args.instrument, time=0)) for i in range(superpermutation.size): note = scaleFunction(superpermutation[i]) track.append(Message('note_on', note=note, time=0)) track.append( Message('note_off', note=note, time=2**(note_lengths[i] + 10 - N))) if args.outputfile == "inputfile": mid.save(args.inputfile.split('.')[0] + ".mid") else: mid.save(args.outputfile) if args.play: port = mido.open_output() for msg in mid.play(): port.send(msg)
def __init__(self, filename): self.filename = filename self.mid = MidiFile() self.track = MidiTrack() self.mid.tracks.append(self.track)
def write_midi_notes(midi_notes): mid = MidiFile() track = MidiTrack() mid.tracks.append(track)
def _write_midi_track(self, tab: Tab, name: str, midifile: MidiFile, alt_track: bool = False): meta, track, time = MetaData(), MidiTrack(), "4/4" self._write_track_header(track, channel=int(alt_track), track_name=name) self._write_time_signature(track=track) self._write_key_signature(track=track) self._set_track_tempo(track=track) # self._instrument_change(track=track, channel=9, instrument=118) for staff in tab.staves(): print("Start writing of staff id[%d]" % staff.id()) print("Staff meta information = %s" % staff.meta_keys()) print("Times staff is played = %d" % staff.times_played()) if staff.meta_value("tempo") != "None": self._set_track_tempo(track=track, bpm=int(staff.meta_value("tempo"))) print("Found meta value:", staff.meta_value("tempo")) if staff.meta_value("time") != "None": time = staff.meta_value("time") self._write_time_signature(track=track, signature=time) print("Found meta value:", staff.meta_value("time")) if staff.meta_value("key") != "None": self._write_key_signature(track=track, key=staff.meta_value("key")) print("Found meta value:", staff.meta_value("key")) ledgers = [] if alt_track: ledgers = staff.alt_track() else: ledgers = staff.main_track() start, end = 0, len(ledgers[0].text()) - 1 measures_count = 0 for _ in range(1, staff.times_played() + 1): for i in range(start, end + 1): if check_end_of_measure(ledgers, i) and i != end: time_sig = utils.get_time_signature(time) time_numerator, time_denominator = time_sig[0], str( time_sig[1]) total_time_per_measure = TICKS_PER_BEAT[ time_denominator] * time_numerator total_ticks_per_measure = ledgers[0].text()[i + 1:].find( "|") tick_time = int(total_time_per_measure / total_ticks_per_measure) meta.set_tick_time(tick_time) measures_count += 1 print("measure[%d]: total_time_per_measure = %d, total_ticks_per_measure = %d, tick_time = %d" % \ (measures_count, total_time_per_measure, total_ticks_per_measure, tick_time)) continue for ledger in ledgers: char = ledger.char(i) octave = ledger.octave() if meta.ledger_active(ledger.name()): if char in BREAK_CHARS: old_char = meta.clear_ledger(ledger.name()) delta_time = meta.time_since_last_message() meta.clear_timer() self._write_note_off(track, channel=int(alt_track), note=utils.get_note_value( old_char, octave), time=delta_time) elif char in VALID_NOTES: old_char = meta.clear_ledger(ledger.name()) meta.activate_note(ledger.name(), char) delta_time = meta.time_since_last_message() meta.clear_timer() self._write_note_off(track, channel=int(alt_track), note=utils.get_note_value( old_char, octave), time=delta_time) self._write_note_on(track, channel=int(alt_track), note=utils.get_note_value( char, octave), time=0) else: if char in VALID_NOTES: meta.activate_note(ledger.name(), char) delta_time = meta.time_since_last_message() meta.clear_timer() self._write_note_on(track, channel=int(alt_track), note=utils.get_note_value( char, octave), time=delta_time) if not check_end_of_measure(ledgers, i): meta.tick() self._write_end_of_track(track) midifile.tracks.append(track)
def main(): # Read the file mid = MidiFile('midi_partitures/el_aguacate.mid') n_channels = 16 seconds = mid.length ticks_per_beat = mid.ticks_per_beat ticks_per_second = ticks_per_beat * 2 # (120 beats / 60 seconds). # Default of 120 beats per minute. l = [] for i, track in enumerate(mid.tracks): l.append(len(track)) max_notes = max(l) # define input sequences. #track = [60] * (max_notes + 10000) #tracks = [track] * n_channels tracks = np.full((n_channels, max_notes + 10000), 60) velocity = np.full((n_channels, max_notes + 10000), 64) time = np.full((n_channels, max_notes + 10000), 0) # Play the song ... current_time = 0.0 i = 0 contador = 0 model = None meta_msgs = [] notes_msgs = [] for msg in mid.play(meta_messages=True): if msg.is_meta: meta_msgs.append(msg) if (msg.type == 'note_on'): if msg.time != current_time: i = i + 1 current_time = msg.time # type, channel, note, velocity, time. tracks[msg.channel][i] = msg.note velocity[msg.channel][i] = msg.velocity time[msg.channel][i] = msg.time contador = contador + 1 if contador % 100 == 0: #model = unitary_train(tracks[:, contador - 100:contador]) break # Write the song. file = MidiFile(type=1) for meta in enumerate(meta_msgs): print(meta) for i in range(0, n_channels): track_i = MidiTrack() for j in range(0, max_notes): track_i.append(Message('note_on', note=tracks[i][j], velocity=velocity[i][j], time=time[i][j])) file.tracks.append(track_i) file.print_tracks() file.save('fixed_' + mid.filename) print('wrote') '''# demonstrate prediction
def run(self): if self.check_no_errors(): track = MidiTrack() for i, msg in enumerate(self.mid.tracks[1]): self.ui.progressBar.setValue(int(100 * (i + 1) / self.length)) if msg.is_meta: track.append(msg) if msg.type == 'note_on': new_note = msg.note new_vel = msg.velocity new_start = msg.time new_end = msg.time + 1 #Search for msg note off time for j in range(i, len(self.mid.tracks[1])): test_msg = self.mid.tracks[1][j] if test_msg.type == 'note_off' and test_msg.note == msg.note: new_end = test_msg.time break if self.enable_flags['Note']: if self.except_flags['Note']: if msg.note not in self.note_targets['Note']: new_note = self.clamp( 0, msg.note + randint(min_vals['Note'][0], max_vals['Note'][0]), 127) else: if msg.note in self.note_targets['Note']: nindex = self.note_targets['Note'].index( msg.note) new_note = self.clamp( 0, msg.note + randint(min_vals['Note'][nindex], max_vals['Note'][nindex]), 127) if self.enable_flags['Velocity']: if self.except_flags['Velocity']: if msg.note not in self.note_targets['Velocity']: new_vel = self.clamp( 0, msg.vel + randint(min_vals['Velocity'][0], max_vals['Velocity'][0]), 127) else: if msg.note in self.note_targets['Velocity']: nindex = self.note_targets['Velocity'].index( msg.note) new_vel = self.clamp( 0, msg.vel + randint(min_vals['Velocity'][nindex], max_vals['Velocity'][nindex]), 127) if self.enable_flags['Start Time']: if self.except_flags['Start Time']: if msg.note not in self.note_targets['Start Time']: new_start = self.clamp( 0, msg.note + randint(min_vals['Start Time'][0], max_vals['Start Time'][0]), 127) else: if msg.note in self.note_targets['Start Time']: nindex = self.note_targets['Start Time'].index( msg.note) new_start = self.clamp( 0, msg.time + randint(min_vals['Start Time'][nindex], max_vals['Start Time'][nindex]), 127) if self.enable_flags['End Time']: if self.except_flags['End Time']: if msg.note not in self.note_targets['End Time']: new_end = self.clamp( msg.time, new_end + randint(min_vals['End Time'][0], max_vals['End Time'][0]), 127) else: if msg.note in self.note_targets['End Time']: nindex = self.note_targets['End Time'].index( msg.note) new_end = self.clamp( msg.time, new_end + randint(min_vals['End Time'][nindex], max_vals['End Time'][nindex]), 127) track.append( Message('note_on', note=new_note, velocity=new_vel, time=new_start)) track.append( Message('note_off', note=new_note, velocity=new_vel, time=new_end)) # track.sort(key=lambda message: message.time) self.create_new_mid(track)
def split_tracks(tdf, mdf, n_meas=4, n_copies=1, n_transpose=0, merge_tracks=False, song_tpb=480, song_idx= 0, folder=None): """ tdf: Track level dataframe, contains information about the track mdf: Message level dataframe, contains information about MIDI messages n_measures: The number of measures that will form an input to the GAN n_duplicates: The number of times the track should be duplicated n_transpose: The number of times the track should be transposed transpose: True if the MIDI data should be transposed, and number of octaves up and down song_tpb: The ticks per beat defined in the original song. This is needed to ensure the time values formed mean something. """ tracks = list(tdf['track_num'].unique()) # Testing Transposing up and down from middle octave if (n_transpose > 0): print('Transposing!') mdf.reset_index(drop=True, inplace=True) mdf['note'] = mdf['note'] % 12 + 60 # 60 corresponds to middle C; this preserves notes but might alter harmonics # Bound the number of transposes to the MIDI range if (n_transpose > 5): n_transpose = 5 if (n_transpose < 0): n_transpose = 0 # For each note, tranpose n times up and down relative to middle C. Since we need middle C to be first: ranges = [0] + [n for n in range(n_transpose*(-1), 0)] + [n for n in range(1, n_transpose+1)] nrs = [{'type': x[1], 'song_idx': x[2], 'track_num': x[3], 'time': x[4] if n == 0 else 0, 'velocity': x[5], 'note': x[6] + n*12, 'ctime': x[7], 'cbeats': x[8], 'bar': x[9], } for x in mdf.itertuples() for n in ranges] mdf = pd.DataFrame.from_records(nrs) mdf = mdf.loc[mdf['note'].between(0,127)] if (merge_tracks == True): print('Merging!') # Okay. What's the functionality to combine tracks together? mdf.sort_values(by=['cbeats', 'track_num'], inplace=True) mdf.reset_index(drop=True, inplace=True) mdf['tmp_idx'] = mdf.index mdf2 = mdf[['cbeats']].rename(columns={'cbeats': 'prev_row_cbeats'}) mdf2['tmp_idx'] = mdf2.index + 1 mdf = mdf.merge(mdf2, on=['tmp_idx'], how='left') mdf.fillna({'prev_row_cbeats': 0}, inplace=True) mdf['beat_delta'] = mdf['cbeats'] - mdf['prev_row_cbeats'] mdf['time'] = (mdf['beat_delta']*song_tpb) mdf = mdf.round({'time': 0}) mdf['time'] = mdf['time'].astype(int) mdf['track_num'] = 1 tracks = [1] mdf = mdf.loc[mdf['type'].isin(['note_on', 'note_off'])] mdf['type'] = np.where(mdf['velocity'] == 0, 'note_off', mdf['type']) # Change type to note off mdf['outfile'] = (mdf['bar']/n_meas).astype(int) midi_type = 1 if n_copies >= 1 else 0 for t in tracks: for f in mdf['outfile'].unique(): #print('Track: %d, Section: %d' % (t,f)) # Create the track specific MIDI file mid = MidiFile(ticks_per_beat=int(song_tpb), type=midi_type) midiTrack = MidiTrack() # Get Specific messages for the track tmdf = mdf.loc[(mdf['track_num'] == t) & (mdf['outfile'] == f)] # Get relevant information is_empty = len(tmdf) == 0 no_note_on = len(tmdf.loc[tmdf['type'] == 'note_on']) == 0 # Skip file if there are no notes played in this track if (is_empty or no_note_on): continue # Tempo MIDI Message midiTrack.append(MetaMessage('set_tempo', time=0, tempo=500000)) # Time Signature MIDI Message (Standardize to 120bpm) midiTrack.append(MetaMessage('time_signature', time=0, numerator=4, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8)) # Key Signature MIDI Message (Shouldn't matter since MIDI note number determines the correct note) midiTrack.append(MetaMessage('key_signature', time=0, key='C')) # Individual Messages corresponding to notes midiTrack += [Message(x[1], time=int(x[4]), note=int(x[6]), velocity=int(x[5]), channel=0) for x in tmdf.itertuples()] # End of Track MIDI Message midiTrack.append(MetaMessage('end_of_track', time=0)) # If we want to duplicate the track for i in range(0, n_copies+1): mid.tracks.append(midiTrack) filename = folder + str(song_idx) + '_' + str(t) + '_' + str(f) + '.mid' filename_npz = folder + str(song_idx) + '_' + str(t) + '_' + str(f) + '.npz' # Save MIDI and NPZ File mid.save(filename) if (is_empty or no_note_on): print('Filename: %s' % filename) print('Error! No notes found in track, continuing') print(tmdf) print(mdf['outfile'].unique()) try: pyp_mid = pypianoroll.read(filename) pyp_mid.save(filename_npz) except Exception as ex: print(ex) print('Error! Currfile: %s' % filename) continue
def play_exercise(self): self.stop() # Load selected exercise ex = self.exercises[self.exercise] name = ex[0] seq = ex[1] logging.debug(f"Starting exercise '{name}' (pattern: {seq})") # Init midi file midi_file = tempfile.NamedTemporaryFile(delete=False) mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append( Message('program_change', program=self.instrument, time=0) ) # Compute starting note: octave * semitones + relative note base_note = 24 + (self.octave - 1) * 12 + self.note # Seconds per beat seconds = 60 / self.bpm # Note duration is one beat minus the cut, in milliseconds base_duration = (seconds - self.cut) * 1000 # Prepend the base note to the midi if the preview is selected timer_delay = 0 if self.preview: timer_delay = int(base_duration*self.preview_time) track.append( Message('note_on', note=base_note, velocity=100, time=0) ) track.append( Message('note_off', note=base_note, time=timer_delay) ) timer_data = [] # Add the rest of the notes for idx, p in enumerate(seq): # Normalise the note step as a string item = str(p) # Extract the step step = int(sub(r'[^0-9]','',item)) # If the number has dashes or dots, add half a beat or a quarter beat for each, respectively duration = base_duration * (1 + item.count('-')*0.5 + item.count('.')*0.25) # Calculate percentage of current step current_index = idx + 1 percent = (current_index / len(seq)) * 100 # If this is the last note and the user wants to, prolong it if current_index == len(seq) and self.prolong: logging.debug(f"prolonging {step}") delay = int(base_duration*self.prolong_time) else: delay = int(duration) # Append the note to the midi track.append( Message('note_on', note=(base_note+step), velocity=100, time=0) ) track.append( Message('note_off', note=(base_note+step), time=delay) ) timer_data.append([percent, timer_delay]) timer_delay += duration # Save midi file and load it with pygame separately, # to avoid race conditions mid.save(file=midi_file) midi_file.flush() midi_file.close() pygame.mixer.music.load(midi_file.name) pygame.mixer.music.play() # Cleanup if 'WARMUPPY_KEEP_MIDI' in os.environ: copyfile(midi_file.name, os.environ['WARMUPPY_KEEP_MIDI']) os.remove(midi_file.name) return timer_data
def midi_write_from_string(self, s:str,fn:str): # WRITE A MIDI FILE mid = MidiFile() track1 = MidiTrack() mid.tracks.append(track1) ma = makeMidi.string_conversion(s) print(ma) velocity = 50 key_mod = 0 last_note = [] note = 0 yyy = 100 # Note On zzz = 100 # Note Off for symb in ma: if symb == 'A': key_mod = 56 elif symb == 'B': key_mod = 58 elif symb == 'C': key_mod = 59 elif symb == 'D': key_mod = 61 elif symb == 'E': key_mod = 63 elif symb == 'F': key_mod = 64 elif symb == 'G': key_mod = 66 else: if symb == '-': yyy = yyy + 100 elif symb == ' ': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,yyy) zzz = zzz + 100 else: if symb == '1': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+0) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '2': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+1) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '3': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+1) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '4': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+2) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '5': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+3) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '6': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+3) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 elif symb == '7': if len(last_note) > 0: makeMidi.off_midi(last_note[0],velocity,zzz) del last_note[0] zzz = 100 last_note.append((int(symb))+note+key_mod+4) makeMidi.on_midi(last_note[0],velocity,yyy) yyy = 100 mid.save(fn)
from mido import Message, MetaMessage, MidiFile, MidiTrack # MIDI test objects # Track that is already quantized. track0 = MidiTrack() track0.append(MetaMessage('track_name', name='test0', time=0)) track0.append(Message('note_on', note=60, velocity=64, time=0)) track0.append(Message('note_off', note=60, velocity=64, time=50)) track0.append(MetaMessage('end_of_track', time=0)) # Simple track that is not quantized. track1 = MidiTrack() track1.append(MetaMessage('track_name', name='test1', time=0)) track1.append(Message('note_on', note=60, velocity=64, time=2)) track1.append(Message('note_off', note=60, velocity=64, time=50)) track1.append(MetaMessage('end_of_track', time=0)) # Track with notes that, when quantized to 32nd notes, would run over # the original end time of the track. track2 = MidiTrack() track2.append(MetaMessage('track_name', name='test2', time=0)) track2.append(Message('note_on', note=60, velocity=64, time=29)) track2.append(Message('note_off', note=60, velocity=64, time=31)) track2.append(MetaMessage('end_of_track', time=0)) meta_track = MidiTrack() meta_track.append(MetaMessage('track_name', name='meta-track', time=0)) midi_notes_in_track0 = MidiFile() midi_notes_in_track0.tracks.append(track0)
def __init__(self, channel: int, instrument: Instruments): self.track = MidiTrack() self.track.name = self.name super().__init__(self.track, channel, instrument)
def writeinit(): global mid global track mid = MidiFile() track = MidiTrack() mid.tracks.append(track)
def generate_music(self, model, length=3000): ''' Generates the midi file based on the learning model - trained model length - length of the midi sequence ''' # Generating the music # Making predictions tic = time.time() y_pred = [] x = self._seed x = numpy.expand_dims(x, axis=0) print('Making Music...') for _ in range(length): pred = model.predict(x) x = numpy.squeeze(x) x = numpy.concatenate((x, pred)) x = x[1:] x = numpy.expand_dims(x, axis=0) pred = numpy.squeeze(pred) y_pred.append(pred) print('Compiling Music File...') for p in y_pred: # Rescaling the value to 0 - 127 # and ensuring it's a valid midi file p[0] = int(127 * p[0]) if p[0] < 0: p[0] = 0 elif p[0] > 127: p[0] = 127 p[1] = int(127 * p[1]) if p[1] < 0: p[1] = 0 elif p[1] > 127: p[1] = 127 # Rescaling the time back to normal time p[2] *= self._max_time if p[2] < 0: p[2] = 0 # print(y_pred) # rendering midi file print('Rendering Midi File...') pred_mid_song = MidiFile() track = MidiTrack() pred_mid_song.tracks.append(track) for p in y_pred: # appending other info as channel(0) and type(147) p = numpy.insert(p, 0, 147) byte = p.astype(int) msg = Message.from_bytes(byte[0:3]) _time = int(p[3] / 0.001025) msg.time = _time track.append(msg) print('Saving midi file') pred_mid_song.save('out/beth_gen1.midi') toc = time.time() print('Time taken for rendering midi file {}'.format(toc - tic)) print('Done')
def generate_mido(notes): note_dict = [] for n in notes: note_dict.append({ 'note': n.pitch, 'start(s)': n.start, 'end(s)': n.end, 'velocity': 64 }) df = pd.DataFrame.from_records(note_dict) dfs = df[['note', 'start(s)']] dfs['type'] = 'note_on' dfs['start_beat'] = dfs['start(s)'] * 2 dfe = df[['note', 'end(s)']] dfe['type'] = 'note_off' dfe['start_beat'] = dfe['end(s)'] * 2 df2 = pd.concat([ dfs[['note', 'type', 'start_beat']], dfe[['note', 'type', 'start_beat']] ]) df2.sort_values(by=['start_beat'], inplace=True) df2['ctime'] = df2['start_beat'] * 480 # Use 480 ticks per beat df2['time'] = df2['ctime'] - df2['ctime'].shift( 1 ) # MIDI commands are sequential, we need to go from cumulative time to time between events df2 = df2.fillna(0) ### USING MIDO TO GENERATE MIDI FILES #### # Create the track specific MIDI file mid = MidiFile(ticks_per_beat=480, type=0) midiTrack = MidiTrack() # Tempo MIDI Message (Set to 120 BPM) midiTrack.append(MetaMessage('set_tempo', time=0, tempo=500000)) # Time Signature MIDI Message (Standardize to 120bpm) midiTrack.append( MetaMessage('time_signature', time=0, numerator=4, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8)) # Key Signature MIDI Message (Shouldn't matter since MIDI note number determines the correct note) midiTrack.append(MetaMessage('key_signature', time=0, key='C')) midiTrack += [ Message(x[2], note=int(x[1]), time=int(x[5]), velocity=64, channel=0) for x in df2.itertuples() ] # End of Track MIDI Message midiTrack.append(MetaMessage('end_of_track', time=0)) # Append Track to MIDI File mid.tracks.append(midiTrack) #mid.save(savedir + '/' + filename + '_' + str(s) + '.mid') # print('Generated MIDO file!') # print(savedir + '/' + filename + '_' + str(s) + '.mid') return mid
def OutputMidi(outpath, path, durm_list, secNum, type): hi_note = 42 s_drum_note = 38 b_drum_note = 36 mid = MidiFile(path) ignore_pos = [3, 7, 8, 12] print(durm_list) print(secNum) # drum new_track = MidiTrack() if type == 0: resolution = int(mid.ticks_per_beat / 4) cnt = 0 for i in range(0, secNum): # repeat leng times for i in range(0, 16): # one section tempo if (durm_list[0][i] or durm_list[1][i] or durm_list[2][i]): msg_on_h = Message('note_on', note=hi_note, velocity=durm_list[0][i] * 48, time=cnt * resolution, channel=9) msg_on_s = Message('note_on', note=s_drum_note, velocity=durm_list[1][i] * 35, time=0, channel=9) msg_on_b = Message('note_on', note=b_drum_note, velocity=durm_list[2][i] * 100, time=0, channel=9) new_track.append(msg_on_h) new_track.append(msg_on_s) new_track.append(msg_on_b) msg_off_h = Message('note_on', note=hi_note, velocity=0, time=resolution, channel=9) msg_off_s = Message('note_on', note=s_drum_note, velocity=0, time=0, channel=9) msg_off_b = Message('note_on', note=b_drum_note, velocity=0, time=0, channel=9) new_track.append(msg_off_h) new_track.append(msg_off_s) new_track.append(msg_off_b) cnt = 0 else: cnt += 1 elif type == 1: resolution = int(mid.ticks_per_beat / 3) cnt = 0 for i in range(0, secNum): # repeat leng times for i in range(0, 16): # one section tempo if (i in ignore_pos): continue if (durm_list[0][i] or durm_list[1][i] or durm_list[2][i]): msg_on_h = Message('note_on', note=hi_note, velocity=durm_list[0][i] * 48, time=cnt * resolution, channel=9) msg_on_s = Message('note_on', note=s_drum_note, velocity=durm_list[1][i] * 35, time=0, channel=9) msg_on_b = Message('note_on', note=b_drum_note, velocity=durm_list[2][i] * 100, time=0, channel=9) new_track.append(msg_on_h) new_track.append(msg_on_s) new_track.append(msg_on_b) msg_off_h = Message('note_on', note=hi_note, velocity=0, time=resolution, channel=9) msg_off_s = Message('note_on', note=s_drum_note, velocity=0, time=0, channel=9) msg_off_b = Message('note_on', note=b_drum_note, velocity=0, time=0, channel=9) new_track.append(msg_off_h) new_track.append(msg_off_s) new_track.append(msg_off_b) cnt = 0 else: cnt += 1 ''' if type == 1: resolution = int(mid.ticks_per_beat/3); remain_resolution = mid.ticks_per_beat-2*resolution print(resolution,remain_resolution) stop = 0; tmp = 0; for i in range(0,secNum): # repeat leng times for i in range(0,16): # one section tempo if (i in ignore_pos): continue; tmp += 1 if(durm_list[0][i] or durm_list[1][i] or durm_list[2][i]): if tmp ==3 : msg_on_h = Message('note_on', note=hi_note, velocity=durm_list[0][i]*48, time=stop, channel = 9) msg_on_s = Message('note_on', note=s_drum_note, velocity=durm_list[1][i]*35, time=0, channel = 9) msg_on_b = Message('note_on', note=b_drum_note, velocity=durm_list[2][i]*48, time=0, channel = 9) new_track.append(msg_on_h); new_track.append(msg_on_s); new_track.append(msg_on_b) msg_off_h = Message('note_on', note=hi_note, velocity=0, time=remain_resolution, channel = 9) msg_off_s = Message('note_on', note=s_drum_note, velocity=0, time=0, channel = 9) msg_off_b = Message('note_on', note=b_drum_note, velocity=0, time=0, channel = 9) new_track.append(msg_off_h); new_track.append(msg_off_s); new_track.append(msg_off_b) stop = 0 else: msg_on_h = Message('note_on', note=hi_note, velocity=durm_list[0][i]*48, time=stop, channel = 9) msg_on_s = Message('note_on', note=s_drum_note, velocity=durm_list[1][i]*35, time=0, channel = 9) msg_on_b = Message('note_on', note=b_drum_note, velocity=durm_list[2][i]*48, time=0, channel = 9) new_track.append(msg_on_h); new_track.append(msg_on_s); new_track.append(msg_on_b) msg_off_h = Message('note_on', note=hi_note, velocity=0, time=resolution, channel = 9) msg_off_s = Message('note_on', note=s_drum_note, velocity=0, time=0, channel = 9) msg_off_b = Message('note_on', note=b_drum_note, velocity=0, time=0, channel = 9) new_track.append(msg_off_h); new_track.append(msg_off_s); new_track.append(msg_off_b) stop = 0 else: if tmp == 3: stop += remain_resolution tmp = 0 else : stop += resolution ''' mid.tracks.append(new_track) mid.save(outpath)
def quantize_track(track, ticks_per_quarter, quantization): '''Return the differential time stamps of the note_on, note_off, and end_of_track events, in order of appearance, with the note_on events quantized to the grid given by the quantization. Arguments: track -- MIDI track containing note event and other messages ticks_per_quarter -- The number of ticks per quarter note quantization -- The note duration, represented as 1/2**quantization.''' pp = pprint.PrettyPrinter() # Message timestamps are represented as differences between # consecutive events. Annotate messages with cumulative timestamps. # Assume the following structure: # [header meta messages] [note messages] [end_of_track message] first_note_msg_idx = None for i, msg in enumerate(track): if msg.type == 'note_on': first_note_msg_idx = i break cum_msgs = list( zip(np.cumsum([msg.time for msg in track[first_note_msg_idx:]]), [msg for msg in track[first_note_msg_idx:]])) end_of_track_cum_time = cum_msgs[-1][0] quantized_track = MidiTrack() quantized_track.extend(track[:first_note_msg_idx]) # Keep track of note_on events that have not had an off event yet. # note number -> message open_msgs = defaultdict(list) quantized_msgs = [] for cum_time, msg in cum_msgs: if DEBUG: print('Message:', msg) print('Open messages:') pp.pprint(open_msgs) if msg.type == 'note_on' and msg.velocity > 0: # Store until note off event. Note that there can be # several note events for the same note. Subsequent # note_off events will be associated with these note_on # events in FIFO fashion. open_msgs[msg.note].append((cum_time, msg)) elif msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0): # assert msg.note in open_msgs, \ # 'Bad MIDI. Cannot have note off event before note on event' if msg.note not in open_msgs: print( 'Bad MIDI. Cannot have note off event before note on event' ) return note_on_open_msgs = open_msgs[msg.note] if len(note_on_open_msgs) == 0: print('Bad MIDI, Note has no end time.') return # assert len(note_on_open_msgs) > 0, 'Bad MIDI, Note has no end time.' note_on_cum_time, note_on_msg = note_on_open_msgs[0] open_msgs[msg.note] = note_on_open_msgs[1:] # Quantized note_on time quantized_note_on_cum_time = quantize_tick(note_on_cum_time, ticks_per_quarter, quantization) # The cumulative time of note_off is the quantized # cumulative time of note_on plus the orginal difference # of the unquantized cumulative times. quantized_note_off_cum_time = quantized_note_on_cum_time + ( cum_time - note_on_cum_time) quantized_msgs.append( (min(end_of_track_cum_time, quantized_note_on_cum_time), note_on_msg)) quantized_msgs.append((min(end_of_track_cum_time, quantized_note_off_cum_time), msg)) if DEBUG: print('Appended', quantized_msgs[-2:]) elif msg.type == 'end_of_track': quantized_msgs.append((cum_time, msg)) if DEBUG: print('\n') # Now, sort the quantized messages by (cumulative time, # note_type), making sure that note_on events come before note_off # events when two event have the same cumulative time. Compute # differential times and construct the quantized track messages. quantized_msgs.sort(key=star(lambda cum_time, msg: cum_time if ( msg.type == 'note_on' and msg.velocity > 0) else cum_time + 0.5)) diff_times = [quantized_msgs[0][0]] + list( np.diff([msg[0] for msg in quantized_msgs])) for diff_time, (cum_time, msg) in zip(diff_times, quantized_msgs): quantized_track.append(msg.copy(time=diff_time)) if DEBUG: print('Quantized messages:') pp.pprint(quantized_msgs) pp.pprint(diff_times) return quantized_track
def notestomidi(notes=["C", "D", "C", "B"], reverb=False, volume=100, tempo=60, filename="ch0.wav", insturments=[0, 1, 2, 3, 4, 5], transpose="1", chord=[4, 6]): mid = MidiFile() notes.remove(notes[len(notes) - 1]) notes.remove(notes[len(notes) - 1]) channel = 0 soundreverb = 1 if reverb: soundreverb = 2 for _ in range(soundreverb): for insturment in insturments: track = MidiTrack() mid.tracks.append(track) filename = filename.replace("Generated/", "") track.append(MetaMessage("marker", text=filename)) track.append(MetaMessage("track_name", name=insturment)) track.append(MetaMessage("instrument_name", name=insturment)) track.append( Message("control_change", channel=channel, control=7, value=round((100 * (volume) / 127) + (100 * (volume / 5 * _) / 127)), time=0)) MICROSECONDS_PER_MINUTE = 60000000 MPQN = MICROSECONDS_PER_MINUTE / tempo track.append(MetaMessage('set_tempo', tempo=int(MPQN), time=0)) track.append( Message('program_change', channel=channel, program=logicins(insturment), time=0)) duration = int(480 / 500 * (MPQN * tempo / 60) / 2000) for notesx in notes: notesx = notesx.replace("9", "").replace("8", "").replace( "7", "").replace("6", "").replace("5", "").replace( "4", "").replace("3", "").replace("2", "").replace( "1", "").replace("0", "").replace("-", "") note = librosa.note_to_midi(notesx + transpose) track.append( Message('note_on', note=note, velocity=127, time=0)) for plus in chord: track.append( Message('note_on', note=note + plus, velocity=127, time=0)) track.append( Message('note_on', note=note, velocity=0, time=int(duration))) for plus in chord: track.append( Message('note_on', note=note + plus, velocity=0, time=0)) channel += 1 mid.save(filename) return mid
import tkinter # tkinter library - graphical user interface functions import tkinter.filedialog # tkinter File dialogue - Select files import pygame # pygame library - midi music playback support import os.path # File system operations import StringToMusic as STM # User defined class, converts string to music from mido import Message, MidiFile, MidiTrack # MIDI message creation import time # Time keeping for audio playback #------------------------------------------------------------------------- # Global Variables #------------------------------------------------------------------------- #-------For MIDI creator----------------------------------- mid = MidiFile() # Initialize MIDI file track1 = MidiTrack() # Initialize MIDI track mid.tracks.append(track1) # Add track to MIDI file fn = "TextMusic.mid" # Filename for MIDI file #---------------------------------------------------------- #------------------------------------------------------------------------- # openTextFileClick() - Click event for openFile button #------------------------------------------------------------------------- def openTextFileClick(): inputFilePath = tkinter.filedialog.askopenfilename() inputFile = open(inputFilePath, 'r') stringTextBox.delete("1.0", "end")
off_time = partial[-1][0] note_off = ['note_off', off_time, note, velocity] midi_events.append(note_off) t2 = perf_counter() print('Complete ({:.2f}ms)'.format((t2 - t1) * 1000)) # Sort events by time, convert absolute times to delta times (mutate list). print('Sorting and processing events...') t1 = perf_counter() midi_events.sort(key=lambda x: x[1]) functions.convert_to_delta(midi_events, PPQN, tempo) t2 = perf_counter() print('Complete ({:.2f}ms)'.format((t2 - t1) * 1000)) # Set up mido MIDI track. track0 = MidiTrack() track0.append(MetaMessage('set_tempo', tempo=tempo)) track0.append(MetaMessage('time_signature', numerator=4, denominator=4)) # Format midi messages and add them to our track for event in midi_events: message, time, note, vel = event track0.append(Message(message, time=time, note=note, velocity=velocity)) # Create a MidiFile object, append our track, save the file midifile = MidiFile() midifile.tracks.append(track0) midifile.save('output.mid') print('COMPLETE')
from mido import Message, MidiFile, MidiTrack, MetaMessage from mido import bpm2tempo import numpy as np mid = MidiFile() track0 = MidiTrack() track1 = MidiTrack() mid.tracks.append(track0) mid.tracks.append(track1) #track.append(Message('program_change', program=12, time=0)) mytempo = bpm2tempo(120) print(mytempo) track0.append(MetaMessage('track_name', name='master', time=0)) track0.append( MetaMessage('time_signature', numerator=4, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8, time=0)) track0.append(MetaMessage('set_tempo', tempo=mytempo, time=0)) #430 passt zu 120bpm, time = 50 #190 delta = 70 pattern = [1, 1, 1, 1] pattern2 = [[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
def webmidi_to_midi(webmidi_json, tempdir): midiNotes = webmidi_json midiFile = MidiFile() midiFile.ticks_per_beat = ticks_per_beat track = MidiTrack() midiFile.tracks.append(track) prevTime = midiNotes[0]["timestamp"] for note in midiNotes: # write into MIDI file with seconds2ticks for timestamp try: eventType = "UNKNOWN" code = (note["data"]["_data"]["0"] >> 4) & 0b00000111 channel = note["data"]["_data"]["0"] & 0b00001111 key = note["data"]["_data"]["1"] velocity = note["data"]["_data"]["2"] & 0b01111111 print("code: ", code, "channel: ", channel, "key: ", key, "vel: ", velocity) time = round( second2tick((note["timestamp"] - prevTime) / 1000, ticks_per_beat=ticks_per_beat, tempo=tempo)) if code == 0b001: eventType = "note_on" track.append( Message(eventType, channel=0, note=key, velocity=velocity, time=time)) print("APPEND NOTE ON: ", eventType, 0, key, velocity, time) prevTime = note["timestamp"] elif code == 0b000: # or code == 0b0000: eventType = "note_off" track.append( Message(eventType, channel=0, note=key, velocity=velocity, time=time)) print("APPEND NOTE OFF: ", eventType, 0, key, velocity, time) prevTime = note["timestamp"] elif code == 0b011: eventType = "control_change" track.append( Message(eventType, channel=0, control=key, value=velocity, time=time)) print("APPEND CONTROL CHANGE: ", eventType, 0, key, velocity, time) prevTime = note["timestamp"] elif code == 0b010: eventType = "polytouch" #track.append(Message(eventType, channel=0, note=key, value=velocity, time=time)) elif code == 0b101: eventType = "aftertouch" #track.append(Message(eventType, channel=0, value=key, time=time)) elif code == 0b110: eventType = "pitchwheel" #track.append(Message(eventType, channel=0, pitch=key, time=time)) elif code == 0b111: eventType = "sysex" #track.append(Message(eventType, data=(key, velocity), time=time)) else: print("webmidi_to_midi: UNKNOWN CODE WAS ", code) print("webmidi_to_midi: EVENT TYPE: ", eventType) except ValueError as e: print("webmidi_to_midi: Problem with: ", note, e) midiFile.save(os.path.join(tempdir, "performanceMidi.mid"))
def save_performance_midi(performed_part, out, mpq=500000, ppq=480, default_velocity=64): """Save a :class:`~partitura.performance.PerformedPart` instance as a MIDI file. Parameters ---------- performed_part : :class:`~partitura.performance.PerformedPart` The performed part to save out : str or file-like object Either a filename or a file-like object to write the MIDI data to. mpq : int, optional Microseconds per quarter note. This is known in MIDI parlance as the "tempo" value. Defaults to 500000 (i.e. 120 BPM). ppq : int, optional Parts per quarter, also known as ticks per beat. Defaults to 480. default_velocity : int, optional A default velocity value (between 0 and 127) to be used for notes without a specified velocity. Defaults to 64. """ track_events = defaultdict(lambda: defaultdict(list)) ct_to_int = dict((v, k) for k, v in MIDI_CONTROL_TYPES.items()) for c in performed_part.controls: track = c.get('track', 0) ch = c.get('channel', 1) t = int(np.round(10**6 * ppq * c['time'] / mpq)) track_events[track][t].append( Message('control_change', control=ct_to_int[c['type']], value=c['value'], channel=ch)) for n in performed_part.notes: track = n.get('track', 0) ch = n.get('channel', 1) t_on = int(np.round(10**6 * ppq * n['note_on'] / mpq)) t_off = int(np.round(10**6 * ppq * n['note_off'] / mpq)) vel = n.get('velocity', default_velocity) track_events[track][t_on].append( Message('note_on', note=n['midi_pitch'], velocity=vel, channel=ch)) track_events[track][t_off].append( Message('note_off', note=n['midi_pitch'], velocity=0, channel=ch)) midi_type = 0 if len(track_events) == 1 else 1 mf = MidiFile(type=midi_type, ticks_per_beat=ppq) for j, i in enumerate(sorted(track_events.keys())): track = MidiTrack() mf.tracks.append(track) if j == 0: track.append(MetaMessage('set_tempo', tempo=mpq, time=0)) t = 0 for t_msg in sorted(track_events[i].keys()): t_delta = t_msg - t for msg in track_events[i][t_msg]: track.append(msg.copy(time=t_delta)) t_delta = 0 t = t_msg if out: if hasattr(out, 'write'): mf.save(file=out) else: mf.save(out)