def array_from_file(filename): """ translates midi to array, returning None if error """ filename = filename.numpy() # for x in filename: # print(x) try: m = mido.MidiFile(filename) channels = numpy_from_midi(m) channel = map_to_one_channel(channels) res = group_by_octave(channel.array) except: return np.full(SONG_SHAPE, -1, dtype=np.float32) else: return res
def write_midi_file(filename, notes_offsets_durations_velocities): outfile = mido.MidiFile() track = mido.MidiTrack() outfile.tracks.append(track) track.append(mido.Message('program_change', program=73)) for note, offset, duration, velocity in notes_offsets_durations_velocities: track.append( mido.Message('note_on', note=note, time=offset, velocity=velocity)) track.append( mido.Message('note_on', note=note, time=duration, velocity=0)) #track.append(mido.Message('note_off', note=note, time=tick, velocity=velocity)) outfile.save(filename)
def handle_client_ctrl_message(self, message): topic = message['topic'] msg = message['msg'] if msg.has_key('encoding'): if msg['encoding'] == 'base64': decoded = binascii.a2b_base64(msg['content']) if msg['what'] == 'file': midi_file = mido.MidiFile(file=io.BytesIO(decoded)) msg['obj'] = midi_file else: print('Usupported "what" ' + repr(msg['what'])) return else: print('Usupported encoding ' + repr(msg['encoding'])) return self.ctrl_object.handle_ctrl_message(topic, msg)
def _process_raw_midi(filename: tf.Tensor, velocity_bins: int, rest_resolution: int): """ processes midi filename at `filename` and returns a tensor with string representations of the midi sequence. """ # run eagerly processed = mido.MidiFile(filename.numpy()) transforms = _chain_callables([ transform.VelocityBinner(min_val=MIN_VELOCITY, max_val=MAX_VELOCITY, num_bins=velocity_bins), transform.TimeQuantizer(resolution=rest_resolution), transform.VocabularyConverter(resolution=rest_resolution) ]) return tf.constant(transforms(processed), dtype=tf.string)
def separate_tracks_into_mp3s(args: argparse.Namespace, midifile_path: str) -> None: midi_data: mido.MidiFile = ensure_midi_well_formatted( mido.MidiFile(midifile_path)) solo_parts: List[Part] = [] voices: list = create_voices() add_accompaniment(voices) for part_name, track_numbers, instrument in voices: solo_part: Part = generate_solo_parts( midi_data, track_numbers, part_name, instrument) solo_parts.append(solo_part) part: Part for part in solo_parts: generate_accompaniment(part, solo_parts) generate_full_mp3(solo_parts)
def loadallsongs(dire): filenames=[] num=1 count=1 for dirs,subdr, files in os.walk(dire): for fil in files: #filenames.append(fil) try: filenames.append(mido.MidiFile("%s"%(dirs+'\\'+fil))) count+=1 except Exception as e: #print("file Number %s: %s"%(num,e)) pass num+=1 print("Successfully loaded %s of %s Midi files"%(count,num)) return filenames
def main(): filename = sysArgvOrInput() outputs = mido.get_output_names() for i, name in enumerate(outputs): print(i, name, sep='\t') port_name = outputs[int(inputChin('> ', 0))] with mido.open_output(port_name) as port: with mido.MidiFile(filename) as mid: print('playing...') try: for msg in mid.play(): print(msg) port.send(msg) except KeyboardInterrupt: print('Stop. ') print('ok')
def main(): global note_map print("疯物之诗琴 by luern0313") print("世界线变动率:1.0.1.7546855") print("\n如果要使用快捷键演奏:") print("ctrl + shift + 数字键 :开始演奏对应的曲目") print("ctrl + shift + s : 停止演奏") read_configure() while True: try: global file_list file_list = os.listdir("midi/") print("选择要打开的文件(热键对应前十个):") print("\n".join( [str(i) + "、" + file_list[i] for i in range(len(file_list))])) hk = SystemHotkey(consumer=play_hot) for i in range(10): hk.register(('control', 'shift', str(i)), i) hk2 = SystemHotkey() hk2.register(('control', 'shift', 's'), callback=stop_hot) midi = mido.MidiFile("midi/" + file_list[int(input("请输入文件前数字序号:"))]) print_split_line() tracks = midi.tracks base_note = get_base_note(tracks) if configure[ "lowest_pitch_name"] == -1 else configure["lowest_pitch_name"] note_map = { note[i] + base_note * 12: key[i] for i in range(len(note)) } time.sleep(1) for msg in midi.play(): if msg.type == "note_on" or msg.type == "note_off": note_list = get_note(msg.note) for n in note_list: if n in note_map: if msg.type == "note_on": if vk[note_map[n]] in pressed_key: release_key(vk[note_map[n]]) press_key(vk[note_map[n]]) elif msg.type == "note_off": release_key(vk[note_map[n]]) except Exception as e: print("ERR:" + str(e))
def maestro_sg_and_note_seq_fn(year, rec_name): def _get_num_frames_fn(year, rec_name): wav_file = os.path.join(os.environ['maestro'], year, rec_name + '.wav') wav_info = soundfile.info(wav_file) assert wav_info.samplerate in (44100, 48000) sr = 44100 if wav_info.samplerate == 48000: num_frames = (wav_info.frames * sr + wav_info.samplerate - 1) // wav_info.samplerate else: num_frames = wav_info.frames num_frames = MiscFns.num_samples_to_num_frames_fn(num_frames) return num_frames num_frames = _get_num_frames_fn(year, rec_name) vqt_file = os.path.join(os.environ['maestro_vqt'], year, rec_name + '.vqt') _rec_name, vqt = MiscFns.load_np_array_from_file_fn(vqt_file) assert _rec_name == rec_name assert vqt.dtype == np.float32 and vqt.shape == (num_frames, 336) sr = 44100 mid_file = os.path.join(os.environ['maestro'], year, rec_name + '.midi') num_frames_from_midi = mido.MidiFile(mid_file).length num_frames_from_midi = int(np.ceil(num_frames_from_midi * sr)) num_frames_from_midi = MiscFns.num_samples_to_num_frames_fn( num_frames_from_midi) num_frames_from_midi += 2 num_frames = min(num_frames, num_frames_from_midi) vqt = vqt[:num_frames] vqt = np.require(vqt, dtype=np.float32, requirements=['O', 'C']) vqt.flags['WRITEABLE'] = False note_seq = MiscFns.get_note_seq_from_mid_file_fn( mid_file_name=mid_file) times_and_pitches = MiscFns.note_seq_to_valued_intervals(note_seq) note_intervals = times_and_pitches['times'] note_intervals.flags['WRITEABLE'] = False note_pitches = times_and_pitches['pitches'] note_pitches.flags['WRITEABLE'] = False assert len(note_intervals) == len(note_pitches) return dict(sg=vqt, intervals=note_intervals, pitches=note_pitches)
def _save(): global _midi_history, _time_counter midi_file = mido.MidiFile() midi_track = mido.MidiTrack() midi_file.tracks.append(midi_track) for i, pos in enumerate(_pedal_poses): midi_track.append( mido.Message("control_change", channel=0, control=_pedal_controls[i], value=pos, time=0)) midi_track.append( mido.Message("control_change", channel=1, control=_pedal_controls[i], value=pos, time=0)) first_msg = next(iter(_midi_history), None) if not first_msg: return first_msg.time = 0 for msg in _midi_history: msg.time = int( mido.second2tick(msg.time, midi_file.ticks_per_beat, mido.bpm2tempo(120))) midi_track.append(msg) _, _, file_names = next(walk(_midi_path), (None, None, [])) last_file = max(file_names, key=lambda name: int(name[5:-5]), default=None) if last_file: last_idx = int(last_file[5:-5]) else: last_idx = -1 new_name = "midi_" + str(last_idx + 1) midi_file.save(_midi_path + new_name + ".midi") _midi_history = [] _time_counter = 0 return new_name
def midi2score(song, subdivision): mid = mido.MidiFile(song) tempo = 0 sec_per_tick = 0 length = mid.length time = 0 # set initial score len for msg in mid: if (msg.is_meta): if (msg.type == 'set_tempo'): tempo = msg.tempo else: if (msg.type == "note_on"): bpm = mido.tempo2bpm(tempo) sec_per_tick = 60 / bpm / subdivision break score = np.zeros((int(length / sec_per_tick) + 1, 90)) for msg in mid: time += msg.time pos = int(np.round(time / sec_per_tick)) if (pos + 1 > score.shape[0]): score = np.append(score, np.zeros((pos - score.shape[0] + 1, 90)), axis=0) if (msg.is_meta): if (msg.type == 'set_tempo'): tempo = mido.tempo2bpm(msg.tempo) sec_per_tick = 60 / tempo / subdivision elif (msg.type == 'note_on'): if (msg.velocity == 0): p = msg.note - 21 score[pos:, p] = 0 else: p = msg.note - 21 score[pos:, p] = 1 elif (msg.type == 'note_off'): p = msg.note - 21 score[pos:, p] = 0 return score
def encoding_to_midi(encoding, out_path, max_velocity=108): """ Convert an encoding back to a midi file. Check recreation_test for results :param encoding: array that encodes a midi file :param out_path: path to save new midi file :param max_velocity: maximum velocity of dataset. calculated using calculate_median_velocity() """ mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) data = [] velocity_lst = [] for vector in encoding: n_on = one_hot_to_int(vector[0:128]) n_off = one_hot_to_int(vector[128:256]) velocity = one_hot_to_int(vector[256:288]) time_shift = one_hot_to_int(vector[288:413]) # if time_shift value >= 125, we need to add the time value to the original msg and not create a new msg if n_on == 0 and n_off == 0 and velocity == 0 and len(data) != 0: data[-1]['time_shift'] += time_shift else: data.append({ 'n_on': n_on, 'n_off': n_off, 'velocity': velocity, 'time_shift': time_shift }) velocity_lst.append(velocity) for i, msg in enumerate(data): # We normalized the velocity bw [0,32) while encoding. # Here we are renormalizing the velocity to a value # that is more representative of the velocities in the original dataset. normalized_velocity = int( np.floor(((velocity_lst[i] - min(velocity_lst)) / (max(velocity_lst) - min(velocity_lst))) * max_velocity)) track.append( mido.Message(type='note_on', note=max(msg['n_on'], msg['n_off']), velocity=normalized_velocity, time=msg['time_shift'])) mid.save(out_path)
def main(filepath: str = fp): # load file f = mido.MidiFile(filepath) preprocessedTracks = PreprocessTracks(f.tracks) # Outliers in term of delta time will need to be excluded dts = [] for track in preprocessedTracks: for i in range(1, len(track)): dts.append(track[i].time - track[i - 1].time) dts = array(dts) exclThreshold = FindExclusionThreshold(arr=dts[dts > 0], cutoffMultiplier=5) """ dts.sort() dts = [int(elem) for elem in dts] deltaTimes = Counter() deltaTimes.AddListElements(dts) """ specs = [] for track in preprocessedTracks: isDrumsTrack = IsDrumsTrack(track) times, frequencies, deltaTimes, intervalsStandalone, intervalsSuccession = ExtractDataTrack( track, exclThreshold) # sort intervals currSpecs = { "Times": times, "Frequencies": frequencies, "DeltaTimes": deltaTimes, "IntervalsStandalone": intervalsStandalone, "IntervalsSuccession": intervalsSuccession, "IsDrumsTrack": isDrumsTrack } specs.append(currSpecs) """ Times: [float], Frequencies: [float], DeltaTimes: {int: int}, IntervalsStandalone: {str: int}, // for color, lookup can be defined somewhere else IntervalsSuccession: {str: { str: int}} IsDrumsTrack: bool specs.append(currSpecs) """ return specs
def load_midi(filename): """ input: a midi file given by path 'filename' returns symbolic midi data as a list of notes each note note is a tuple consisting of: pitch - integer in [0,127] onset - real-valued time in seconds offset - real-valued time in seconds the list of notes is sorted by onset time """ midi = mido.MidiFile(filename) notes = [] time = 0 for message in midi: time += message.time # velocity == 0 equivalent to note_off, see here: # http://www.kvraudio.com/forum/viewtopic.php?p=4167096 if message.type == 'note_on' and message.velocity != 0: # some midis seem to have timing info on channel 0 # but not intended to be played? (e.g. ravel) #if message.channel==0: # continue notes.append((message.note, time, -1)) elif (message.type == 'note_off') or (message.type == 'note_on' and message.velocity == 0): # Find the last time this note was played and update that # entry with offset. for i, e in reversed(list(enumerate(notes))): (note, onset, offset) = e if note == message.note: notes[i] = (note, onset, time) break # only keep the entries with have an offset notes = [x for x in notes if not x[2] == -1] # sanity checks for note, onset, offset in notes: assert onset <= offset assert time == midi.length return notes, midi.ticks_per_beat
def mid2roll(midi_file_name, roll_file_name): roll = [] midi_file = mido.MidiFile(midi_file_name) keys_to_press = "" keys_to_release = "" for msg in midi_file: # flush key buffers if time delta from last message is present, then sleep this time delta if msg.time != 0: if len(keys_to_release) != 0: roll.append([EVENT_KEY_RELEASE, keys_to_release]) keys_to_release = "" if len(keys_to_press) != 0: roll.append([EVENT_KEY_PRESS, keys_to_press]) keys_to_press = "" roll.append([EVENT_SLEEP, msg.time]) # note that NOTE_ON with zero velocity is equal to NOTE_OFF if msg.type == 'note_on' and msg.velocity != 0: keys_to_press += note_to_key(msg.note) elif msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0): keys_to_release += note_to_key(msg.note) # flush last unflushed message if len(keys_to_release) != 0: roll.append([EVENT_KEY_RELEASE, keys_to_release]) keys_to_release = "" if len(keys_to_press) != 0: roll.append([EVENT_KEY_PRESS, keys_to_press]) keys_to_press = "" # write roll to the file roll_file = open(roll_file_name, "w") for event_type, arg in roll: print(event_type, arg, file=roll_file) roll_file.close()
def processMidi(midiPath, fps): mid = mido.MidiFile(midiPath) file_seq = os.path.basename(midiPath).split('.')[0] midi_offset = cfg.EVALUATE_MAP[file_seq]['midi_offset'] timeCount = 0 dataList = [] for msg in mid: if not msg.is_meta: if msg.type == 'control_change': timeCount = timeCount + msg.time elif msg.type == 'note_on' or msg.type == 'note_off': timeCount = timeCount + msg.time data = [msg.type, msg.note - 20, msg.velocity, timeCount] # print('the frame is {}'.format(timeCount*fps)) # print(data) # embed() dataList.append(data) # print(dataList) dict1 = {} result = [] for data in dataList: if data[0] == 'note_on' and data[2] > 0: dict1[data[1]] = data[1:] else: #---noteoff中后面的时间应该是当前时间啊,减掉对应的noteon中的时间才是持续时间 dict1[data[1]].append(data[3]) result.append(dict1.pop(data[1])) #---result->(按键,按下速度,按键起始时间,按键结束时间) result = sorted(result, key=lambda x: x[2]) pitch_onset = [] for item in result: #- midi_offset相当于对应到视频帧中第一个按下键的位置去了 po = [item[2] - midi_offset, item[0]] #---按下的时间/按键 pitch_onset.append(po) #---时间和按键,没有包括结束时间 pitch_onset = sorted(pitch_onset, key=lambda x: (x[0], x[1])) #print(pitch_onset) pitch_onset_offset = [] for item in result: #--起始时间/结束时间/按键 po = [item[2] - midi_offset, item[3] - midi_offset, item[0]] pitch_onset_offset.append(po) pitch_onset_offset = sorted(pitch_onset_offset, key=lambda x: (x[0], x[1], x[2])) return pitch_onset, pitch_onset_offset
def load(cls, model_file_path): '''[summary] Arguments: model_file_path {string} -- file path to a directory ''' directory = Path(model_file_path) if directory.exists() and directory.is_dir(): midifile = mido.MidiFile(str(directory / Path('source.midi'))) models = [] for model_file in sorted(directory.glob('*.keras')): models.append(load_model(str(model_file))) model = MIDISequencifier(midifile) model.models = models return model else: raise FileNotFoundError()
def read_file(filepath): """ Reads a format0 midi file, creating Note objects for each note and compiling a list of frames (snapshots of active notes) for each midi time increment. """ midifile = mido.MidiFile(filepath) notes = {} # { note_id : note } frames = [] # [ [note_id] ] Each frame is a list of note ids for trk in midifile.tracks: current_note_ids = [] current_notes = {} # { midi_val : note } time = 0 for msg in trk: if msg.time > 0: # Grab frame if len(frames) == 0 or frames[-1].notes != current_note_ids: frames.append(Frame(current_note_ids, len(frames))) time += msg.time note_on = msg.type == 'note_on' and msg.velocity > 0 note_off = (msg.type == 'note_on' and msg.velocity == 0) or msg.type == 'note_off' if note_on: n = current_notes.pop(msg.note, None) if n: n.end_t = time current_note_ids.remove(n.id) new_note = Note(msg.note, time) # Register note notes[new_note.id] = new_note current_notes[msg.note] = new_note current_note_ids.append(new_note.id) elif note_off: n = current_notes.pop(msg.note, None) if n: n.end_t = time current_note_ids.remove(n.id) return notes, frames
def get_raw_data(filenames): ''' Retrieves and encodes data from MIDI files based on list of filenaes :param filenames: list of filenames :return: train, val, test: training data, validation data, testing data in a 80/10/10 split ''' data = [] # i changed your progress bar bc it wasnt working on my computer... for i, file in enumerate(filenames): print_prgress_bar(i, len(filenames)) midi_file = mido.MidiFile(file, clip=True) encoding = create_encoding(midi_file) data.append(encoding) return data
def parse(midiName): mid = mido.MidiFile(midiName) data = [] thereAreNoteOffs = False for i, track in enumerate(mid.tracks): for msg in track: if msg.type == "note_off": thereAreNoteOffs = True break if thereAreNoteOffs: data = parseYesOffs() else: data = parseNoOffs() return data
def readmidi(filename, play=False, plot=False, printtracks=False): start = datetime.datetime.now() mid = mido.MidiFile(filename, clip=True) if printtracks: for track in mid.tracks: for message in track: print(message) print("\n\n\n") if play: Play.play(filename, 1, 1) if plot: import GenPlot GenPlot.showplotofmidi( filename, "GeneratedPlots/" + filename.replace("mid", "png").split("/")[-1]) plot = datetime.datetime.now() - start print("Ploted", filename, "in", str(plot))
def test_tempo_changes(): mid = mido.MidiFile() meta_track = mido.MidiTrack() track = mido.MidiTrack() mid.tracks.extend([meta_track, track]) ticks_per_beat = mid.ticks_per_beat # Add sixteen quarter notes for i in range(10): track.append(mido.Message("note_on", note=60 + i, time=0)) track.append(mido.Message("note_off", note=60 + i, time=ticks_per_beat)) # Add tempo changes every 2.5 beats for i, tempo in enumerate(TEMPOS): meta_track.append( mido.MetaMessage( "set_tempo", tempo=mido.bpm2tempo(tempo), time=int(ticks_per_beat * 2.5 if i != 0 else 0), )) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) mid.save(OUT_PATH) settings = DummySettings((OUT_PATH, )) score = midani_score.read_score(settings) tempo_changes = midani_time.TempoChanges(score) times = [ 0, 0.5, 1.0, 1.4583333333333333, 1.875, 2.291666666666667, 2.666666666666667, 3.041666666666667, 3.3996212121212124, 3.740530303030303, ] for note, time in zip(score.voices[0], times): # Times do not agree exactly because tempos don't come out exactly # from mido conversion (e.g., 144 becomes 143.99988480009216) assert ( abs(time - tempo_changes.ctime_from_btime(note.attack_time)) < 1e-6 ), "abs(time - tempo_changes.ctime_from_btime(note.attack_time)) >= 1e-6"
def note_statematrix_to_midi(statematrix, name="example", tickscale=180): """Convert statematrix to .mid file (output) """ statematrix = np.asarray(statematrix) pattern = midi.MidiFile() track = midi.MidiTrack() pattern.tracks.append(track) span = upper_bound - lower_bound lastcmdtime = 0 prevstate = [[0, 0] for x in range(span)] for time, state in enumerate(statematrix + [prevstate[:]]): offNotes = [] onNotes = [] for i in range(span): n = state[i] p = prevstate[i] if p[0] == 1: if n[0] == 0: offNotes.append(i) elif n[1] == 1: offNotes.append(i) onNotes.append(i) elif n[0] == 1: onNotes.append(i) for note in offNotes: track.append( midi.Message('note_off', time=(time - lastcmdtime) * tickscale, note=note + lower_bound)) lastcmdtime = time for note in onNotes: track.append( midi.Message('note_on', time=(time - lastcmdtime) * tickscale, velocity=40, note=note + lower_bound)) lastcmdtime = time prevstate = state pattern.save(name)
def output(data): mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(35))) for i in data: print(i) track.append( mido.Message("note_on", note=i if i <= 127 else 127, velocity=127, time=64)) #track.append(mido.Message("note_on", note=i[0] if i[0] <= 127 else 127, velocity=127, time=i[1] if i[1] <= 127 else 127)); mid.save("new_song.mid")
def preview_notes(self): ''' Previews the self.keys list audibly and visually simultaneously. ''' self.process_H_W() self.message('Previewing notes...') fn_font=os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts','font.sf2') fn_midi=os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.mid') fn_wav=os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.wav') if get_init() is None: # Checks if pygame has initialized audio engine. Only needs to be run once per instance pre_init(fs, -16, 2, 1024) init() set_num_channels(128) # We will never need more than 128... mid=mido.MidiFile() track=mido.MidiTrack() mid.tracks.append(track) mid.ticks_per_beat=1000 track.append(mido.MetaMessage('set_tempo', tempo=int(1e6))) track.append(mido.Message('program_change', program=sound_presets[self.cfg['sound_preset']], time=0)) for i in range(len(self.keys)): track.append(mido.Message('note_on', note=self.keys[i], velocity=100, time=250)) track.append(mido.Message('note_off', note=self.keys[i], time=250)) track.append(mido.Message('note_off', note=self.keys[i], time=500)) mid.save(fn_midi) cmd='fluidsynth -ni {} -F {} -r {} {} {} '.format(self.cfg['fluidsynthextracommand'],fn_wav,fs,fn_font,fn_midi) os.system(cmd) music.load(fn_wav) for i in range(len(self.keys)): t=time.time() self.imW.remove() Wtmp=self.data['W_pp'][:,i] cmaptmp=self.cmap[i,:-1] self.imW=self.Wax2.imshow((Wtmp[:,None]@cmaptmp[None,:]*255/np.max(self.data['W_pp'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8')) self.canvas_W.draw() self.update() if i==0: music.play(0) time.sleep(.5-np.min(((time.time()-t),.5))) time.sleep(.5) music.unload() try: os.remove(fn_midi) os.remove(fn_wav) except OSError as e: print("Failed with:", e.strerror) self.refresh_GUI()
def mk_midi_track(messages) -> mido.MidiFile: mid = mido.MidiFile(type=0) bpm = 120 ticks_per_second = 1000 ticks_per_minute = ticks_per_second * 60 ticks_per_beat = int(ticks_per_minute / bpm) mid.ticks_per_beat = ticks_per_beat track = mido.MidiTrack() mid.tracks.append(track) track.append( mido.MetaMessage("instrument_name", name="Acoustic Grand Piano")) for i in MidiFile.available_channel: track.append( mido.Message("program_change", program=0, time=0, channel=i)) for message in messages: track.append(message) return mid
def get_key_signature_in_meta(): root_dir = 'E:/free_midi_library/raw_midi' midi_collection = get_midi_collection() for midi in midi_collection.find({}, no_cursor_timeout=True): original_path = os.path.join(root_dir, midi['Genre'] + '/', midi['md5'] + '.mid') try: mido_object = mido.MidiFile(original_path) for i, track in enumerate(mido_object.tracks): for msg in track: if msg.is_meta and msg.type == 'key_signature': print(msg) print() except Exception: print(traceback.format_exc())
def playhalfn(keys): l = list() for i in range(0,len(keys)): if keys[i]: keyname = pygame.key.name(i) if keyname in knm[0]: for i in helfnotes: lhn = list(i) ln = list((knm[1][knm[0].index(keyname)]).upper()) if (lhn[0] == ln[0]) and (lhn[2] == ln[1]): pygame.mixer.music.load('midies/'+i.upper()+'.mid') pygame.mixer.music.play() m = mido.MidiFile('midies/'+note+'.mid') l.append(keyname) l.append(i) l.append(m.length) return l
def init_midi(er, super_pattern): # LONGTERM not really crazy about these side-effects er.num_new_tracks, er.num_existing_tracks = _build_track_dict( er, super_pattern.num_voices) # When I was using midiutil, ticks_per_quarternote needed to be high enough # that no note_on and note_off # events ended up on the same tick, because midiutil doesn't sort them # properly and throws an error if the note_off comes before the note_on. # Not sure if mido has any similar issues but leaving ticks_per_beat at # a high value for now. mf = mido.MidiFile(ticks_per_beat=3200) # Add one for META_TRACK, which will be track 0 for _ in range(er.num_new_tracks + er.num_existing_tracks + 1): mf.add_track() return mf
def noteStateMatrixTomidi(statematrix, name="example"): """ TODO: This is the inverse process that midiToNoteStateMatrix makes; comments to do. """ pattern = mido.MidiFile() track = mido.MidiTrack() pattern.tracks.append(track) span = upperBound - lowerBound tickscale = 55 lastcmdtime = 0 prevstate = [[0, 0] for x in range(span)] for time, state in enumerate(statematrix + [prevstate[:]]): offNotes = [] onNotes = [] for i in range(span): n = state[i] p = prevstate[i] if p[0] == 1: if n[0] == 0: offNotes.append(i) elif n[1] == 1: offNotes.append(i) onNotes.append(i) elif n[0] == 1: onNotes.append(i) for note in offNotes: track.append( mido.Message('note_off', note=note + lowerBound, time=(time - lastcmdtime) * tickscale)) lastcmdtime = time for note in onNotes: track.append( mido.Message('note_on', note=note + lowerBound, time=(time - lastcmdtime) * tickscale, velocity=40)) lastcmdtime = time prevstate = state track.append(mido.MetaMessage('end_of_track')) pattern.save("{}.mid".format(name))