def get_mouth_events(self): # note_times = [] current_time = 0; number_on = 0; events = []; open_buffer = 0.1; close_buffer = 0.1 last_type = 0; events.append(Event(start=0, type='mouth_close', weight=1)); for msg in self.mido_track: if not msg.is_meta: delta_time = mido.tick2second(msg.time, self.ticks_per_beat, self.tempo) current_time += delta_time last_time = events[-1].start; passed = current_time-last_time; if msg.type == 'note_on' and msg.velocity > 0: if(last_type==0): if(passed>open_buffer): events.append(Event(start=current_time-open_buffer, type='mouth_closed', weight=0)); events.append(Event(start=current_time, type='mouth_open', weight = truediv(msg.velocity,127))); number_on = number_on + 1; last_type=1; if(msg.type == 'note_off' or (msg.type=='note_on' and msg.velocity == 0)): if (last_type == 1): if (passed > close_buffer): events.append(Event(start=current_time - close_buffer, type='mouth_opened', weight=0)); events.append(Event(start=current_time, type='mouth_close', weight = truediv(msg.velocity,127))); number_on = number_on-1; last_type=0; # assert(number_on>-1); # if(number_on==0): return events;
def play(self, path, basebpm=120, transpose=0) -> None: """ Spielt eine MIDI-Datei auf dem Carillon ab. Dabei wird die erste Spur verwendet. Parameters ---------- path : str Pfad zur abzuspielenden MIDI-Datei. basebpm : int (optional) BPMs, die 120 entsprechen. Damit lässt sich das Stück beschleunigen oder verlangsamen. transpose : int (optional) Anzahl der Halbtöne, um die das Stück transponiert werden soll. """ mid = mido.MidiFile(path) tempo = mido.bpm2tempo(basebpm) for msg in mid.tracks[0]: if msg.type == 'set_tempo': tempo = 120 / basebpm * msg.tempo if msg.type not in ('note_on', 'note_off'): continue time.sleep(mido.tick2second(msg.time, mid.ticks_per_beat, tempo)) if msg.velocity == 0: continue self.hit(msg.note + transpose)
def midi_to_seq(midi_file, track): """ Converts a MIDO track object into an event sequence """ events = [] tempo = None last_velocity = None for msg in track: event_type = msg.type # Parse delta time if msg.time != 0: seconds = mido.tick2second(msg.time, midi_file.ticks_per_beat, tempo) standard_ticks = round(seconds * TICKS_PER_SEC) # Add in seconds while standard_ticks >= 1: # Find the largest bin to put this time in tick_bin = find_tick_bin(standard_ticks) if tick_bin is None: break evt_index = TIME_OFFSET + tick_bin assert evt_index >= TIME_OFFSET and evt_index < VEL_OFFSET, ( standard_ticks, tick_bin) events.append(evt_index) standard_ticks -= TICK_BINS[tick_bin] # Approximate to the nearest tick bin instead of precise wrapping if standard_ticks < TICK_BINS[-1]: break # Ignore meta messages if msg.is_meta: if msg.type == 'set_tempo': # Handle tempo setting tempo = msg.tempo continue # Ignore control changes if event_type != 'note_on' and event_type != 'note_off': continue if event_type == 'note_on': velocity = (msg.velocity) // (MIDI_VELOCITY // VEL_QUANTIZATION) elif event_type == 'note_off': velocity = 0 # If velocity is different, we update it if last_velocity != velocity: events.append(VEL_OFFSET + velocity) last_velocity = velocity events.append(NOTE_ON_OFFSET + msg.note) return np.array(events)
def __get_msg(self, msg: MetaMessage, csr_tempo: int, delta_tick: int): ''' comprehead core messages and generate a new PrMidiMsg ''' abs_tick = msg.time + delta_tick if abs_tick > 0: delta_secs = tick2second(msg.time, self.tpb, csr_tempo) else: delta_secs = 0 return PrMidiMsg(msg.copy(), tick=abs_tick, secs=delta_secs)
def get_velocity_time(self, tick_velocity): """ Convert the velocity to the length of time based on tempo """ # Convert velocity to some number of ticks tick_velocity = tick_velocity if tick_velocity < 127 else 127 tick_velocity = tick_velocity if tick_velocity > 0 else 1 ticks = 127 - tick_velocity return tick2second(ticks, TICKS_PER_BEAT, self.msg_tempo)
def __init__(self, path, duration=None, fps=30, bpm=120, rounded=True, note_names={}): note_names_reversed = {v: k for (k, v) in note_names.items()} midi_path = path if isinstance(path, Path) else Path(path).expanduser() mid = mido.MidiFile(str(midi_path)) events = {} open_notes = {} for i, track in enumerate(mid.tracks): time = 0 cumulative_time = 0 events[track.name] = [] open_notes[track.name] = {} for idx, msg in enumerate(track): if hasattr(msg, "note"): delta_s = mido.tick2second(msg.time, mid.ticks_per_beat, mido.bpm2tempo(bpm)) cumulative_time += delta_s o = open_notes.get(track.name).get(msg.note) if o != None: open_notes[track.name][msg.note] = None events[track.name].append( MidiNote(msg.note, o, cumulative_time, fps, rounded)) if msg.type == "note_on" and msg.velocity > 0: open_notes[track.name][msg.note] = cumulative_time self.note_names = note_names self.midi_file = mid self.fps = fps self.start = 0 self.end = 0 tracks = [] for track_name, es in events.items(): tracks.append( MidiTrack(es, name=track_name, note_names=note_names_reversed)) all_notes = [] for t in tracks: for note in t.notes: all_notes.append(note) self.duration = duration or all_notes[-1].off for t in tracks: t.duration = self.duration self.min = min([n.note for n in all_notes]) self.max = max([n.note for n in all_notes]) self.spread = self.max - self.min self.tracks = tracks
def parse(self, file): self.midi = mido.MidiFile(file) tick = 0 count_notes = 0 current_meta = {'tempo': 500000} meta_messages = [(tick, dict(current_meta))] for track_num, track in enumerate(self.midi.tracks): for message in track: tick += message.time if message.type == 'note_on': self.notes.append({ 'note': message.note, 'track': track_num }) self.pending_notes[message.note] = (count_notes, tick ) # index, begin_tick count_notes += 1 elif message.type == 'note_off': try: assert message.note in self.pending_notes, 'a note_off before note_on' except AssertionError: continue index, begin = self.pending_notes[message.note] self.timeline[begin:tick] = index del self.pending_notes[message.note] elif message.type == 'set_tempo': current_meta['tempo'] = message.tempo meta_messages.append((tick, dict(current_meta))) elif message.type == 'end_of_track': try: assert not self.pending_notes, 'no note_off after note_on' except AssertionError: self.pending_notes = dict() meta_messages.append((tick, dict(current_meta))) tick = 0 lasttime_sec = 0.0 for prev_meta, curr_meta in zip(meta_messages, meta_messages[1:]): interval_sec = mido.tick2second(curr_meta[0] - prev_meta[0], self.midi.ticks_per_beat, prev_meta[1]['tempo']) try: self.metas[lasttime_sec : lasttime_sec + interval_sec] = \ dict(prev_meta[1], ticks=(prev_meta[0], curr_meta[0])) except ValueError: # interval_sec is not a positive number continue lasttime_sec += interval_sec try: assert abs(lasttime_sec - self.midi.length ) < 1, 'wrong mapping from seconds to ticks' except AssertionError: pass
def read_beats(mid, tempo): track: MidiTrack = mid.tracks[0] now = 0 for msg in track: now += msg.time if not isinstance(msg, MetaMessage) and msg.type == 'note_on': yield Beat(note=msg.note, time=tick2second(now, mid.ticks_per_beat, tempo))
def _preload(self): # Insecure!! tempo = 5e5 for msg in self._merged_track: yield fast_copy(msg, time=tick2second(msg.time, self.ticks_per_beat, tempo) if msg.time > 0 else 0) if msg.type == 'set_tempo': tempo = msg.tempo
def parse_midi(filename): score = [] playing = [] tempo = 500000 midi_file = mido.MidiFile(filename) print("Number of tracks:", len(midi_file.tracks)) # Combine all tracks into a single list combined = [] for track in midi_file.tracks: total_ticks = 0 for msg in track: total_ticks += msg.time combined.append(Message(total_ticks, msg)) # Sort the commands to be in order combined.sort() total_seconds = 0.0 total_ticks = 0 for message in combined: msg = message.msg delta_ticks = message.start_tick - total_ticks total_ticks = message.start_tick total_seconds += mido.tick2second(delta_ticks, midi_file.ticks_per_beat, tempo) # print(msg) # Set tempo for song if msg.type == "set_tempo": tempo = msg.tempo score.append(Tempo(total_seconds, msg.tempo)) print("Tempo set to {} at {:.2f} seconds ({} ticks)".format(tempo, total_seconds, total_ticks)) # Start playing note elif msg.type == "note_on" and msg.velocity > 0: playing.append(Note(msg.note, total_seconds, velocity=msg.velocity)) # End playing note if msg.type == "note_off" or (msg.type == "note_on" and msg.velocity == 0): for j in range(len(playing)): if playing[j].note == msg.note: playing[j].duration = total_seconds - playing[j].start score.append(playing.pop(j)) break if len(playing) > 0: print("Leftover notes:", len(playing)) score.extend(playing) score.sort() print("Score length: ", len(score)) return score, midi_file.length
def get_mouth_events(self): # note_times = [] current_time = 0 number_on = 0 events = [] open_buffer = 0.1 close_buffer = 0.1 last_type = 0 events.append(Event(start=0, type="mouth_close", weight=1)) for msg in self.mido_track: if not msg.is_meta: delta_time = mido.tick2second(msg.time, self.ticks_per_beat, self.tempo) current_time += delta_time last_time = events[-1].start passed = current_time - last_time if msg.type == "note_on" and msg.velocity > 0: if last_type == 0: if passed > open_buffer: events.append( Event( start=current_time - open_buffer, type="mouth_closed", weight=0, )) events.append( Event( start=current_time, type="mouth_open", weight=truediv(msg.velocity, 127), )) number_on = number_on + 1 last_type = 1 if msg.type == "note_off" or (msg.type == "note_on" and msg.velocity == 0): if last_type == 1: if passed > close_buffer: events.append( Event( start=current_time - close_buffer, type="mouth_opened", weight=0, )) events.append( Event( start=current_time, type="mouth_close", weight=truediv(msg.velocity, 127), )) number_on = number_on - 1 last_type = 0 # assert(number_on>-1) # if(number_on==0): return events
def dump(self): return { "type": "note", "pitch": self.pitch, "duration": int( mido.tick2second(self.end - self.start, self.tpb, self.tempo) * 1000), }
def CalcPrefixTime(tempoList, tpb): prefix = [] for i in range(len(tempoList)): prefix.append(0) for index, tempo in enumerate(tempoList): if (index > 0): prefix[index] = prefix[index - 1] prefix[index] += mido.tick2second( tempo[1] - tempoList[index - 1][1], tpb, tempoList[index - 1][0]) return prefix
def read_midi(path, samplers, polyphonic=False): mid = mido.MidiFile(path) out = [] for i, track in enumerate(mid.tracks): sampler = samplers[i] notes = {} last_note = (None, 0, 0) t_ptr = 0 for msg in track: t_ptr += msg.time tempo = 500000 if msg.type == "note_on": notes[msg.note] = (t_ptr, msg.velocity / 255) if last_note[0] != None and not polyphonic: note, start, vel = last_note dur = t_ptr - start dur = mido.tick2second(dur, mid.ticks_per_beat, tempo) start = mido.tick2second(start, mid.ticks_per_beat, tempo) if dur > 0.1: note = NoteObject( Note(lr.midi_to_hz(msg.note), vel, dur), sampler, start) out.append(note) last_note = (msg.note, t_ptr, msg.velocity / 255) if msg.type == "note_off": try: start, vel = notes[msg.note] dur = t_ptr - start dur = mido.tick2second(dur, mid.ticks_per_beat, tempo) start = mido.tick2second(start, mid.ticks_per_beat, tempo) note = NoteObject(Note(lr.midi_to_hz(msg.note), vel, dur), sampler, start) out.append(note) except: print("Warning: Problems reading MIDI.") if msg.type == "set_tempo": tempo = msg.tempo print(f"Read MIDI tempo: {tempo}") return Structure(out, 0)
def GetAccTime(tick, tempoList, tpb, prefixSum): ret = 0 if (not hasattr(GetAccTime, "tickTable")): GetAccTime.tickTable = [] for i in tempoList: GetAccTime.tickTable.append(i[1]) index = bisect(GetAccTime.tickTable, tick) ret = prefixSum[index - 1] + mido.tick2second( tick - tempoList[index - 1][1], tpb, tempoList[index - 1][0]) return ret
def get_MIDI_phases_and_times(MIDI_File, tempo, frequency): midi_phases = [] midi_times = [] for track in MIDI_File.tracks: phases = [] times = [] phase = 0 time = 0 for msg in track: #MIDI_data.write("{0} \n".format(msg)) if msg.type == 'note_on': phase += 2 * math.pi * frequency * mido.tick2second( msg.time, MIDI_File.ticks_per_beat, tempo) time += mido.tick2second(msg.time, MIDI_File.ticks_per_beat, tempo) times.append(time) phases.append(math.ceil(phase * 100) / 100) midi_phases.append(phases) midi_times.append(times) return midi_phases, midi_times
def note_labelling(root, txt_name, index): f = open(os.path.join(root, txt_name), "r") total_frame = 0 fi = open("output.txt") for line in fi.readlines(): if re.search("frame", line) is not None: total_frame = int(line[6:11]) rate = int(f.readline()) # frame rate, # of frames per second hour, minute, second, frame = f.readline().split(':') frame_time = 3600 * int(hour) * rate + 60 * int(minute) * rate + int(second) * rate + int(frame) mid_name = re.sub("txt", "mid", txt_name) mid = mido.MidiFile(os.path.join(root, mid_name)) label = np.zeros((total_frame, 128)) # timer = np.zeros(128) # delta_frame = int(mido.tick2second(501,mid.ticks_per_beat,500000.0) * rate) for tracks in mid.tracks: for msg in tracks: # # update timer # timer[label[frame_time]==1] += msg.time # timer[label[frame_time]==0] = 0 # get msg time in sec msg_sec_time = mido.tick2second(msg.time, mid.ticks_per_beat, 500000.0) # broadcast numpy array # mask = label == 1 label[frame_time:frame_time + int(msg_sec_time * rate), :] = label[frame_time, :] # if int(msg.time)> 500 : # label[frame_time+delta_frame:frame_time+int(msg_sec_time*rate) , mask[frame_time] == 1] = 0 # update frame time frame_time += int(msg_sec_time * rate) if frame_time >= total_frame: frame_time = total_frame - 1 # # first iter < 500 < second iter # for j in range(128): # if (timer[j]>500): # delta = int(mido.tick2second(timer[j],mid.ticks_per_beat,500000.0) * rate) # label[frame_time-delta:frame_time, j] = 0 # mark the coming frame if msg.type == "note_on": label[frame_time, msg.note] = 1 elif msg.type == "note_off": label[frame_time, msg.note] = 0 np.save("../dataset/y_train/" + str(index) + '.npy', label) # close file fi.close() f.close()
def cursor(self, tick: int) -> (int, int, int): ''' returns tempo, abs_tick, abs_secs from the given tick ''' csr_tempo, csr_abs_tick, csr_abs_secs = DEFAULT_TEMPO, 0, 0 self.rewind() for t in self: # find the right tempo if t.tick >= tick: break csr_abs_tick, csr_abs_secs, csr_tempo = t.tick, t.secs, t.msg.tempo # counting abs_time # delta tick / delta time dtick = tick - csr_abs_tick dsecs = tick2second(dtick, self.tpb, csr_tempo) # return abs_tick / abs_time return csr_tempo, csr_abs_tick + dtick, csr_abs_secs + dsecs
def linearScaling(fundamental_freqs_query, target_song_name, timesInput): queryMIDI = freqs_2_MIDI(fundamental_freqs_query) directory = mainPathDB + target_song_name target = mido.MidiFile(directory, clip=True) targetMIDI = [] # tempo is time per quarter note = μs per quarter tpb = target.ticks_per_beat lastTempo = target.tracks[0][0].tempo # inSeconds = mido.tick2second(ticks, tpb, lastTempo) buckets = [[0 for i in range(2)] for j in range(len(target.tracks[0]))] i = 0 oldTime = 0 absoluteTime = 0 absoluteTimes = [] noteList = [] for msg in target.tracks[0]: absoluteTime += msg.time if not msg.is_meta: if msg.type == 'note_on': if msg.time != 0: c = copy.deepcopy(noteList) buckets[i] = [c, oldTime] i += 1 oldTime = round( mido.tick2second(absoluteTime, tpb, lastTempo), 3) noteList.append(msg.note) elif msg.type == 'note_off': if msg.time != 0: c = copy.deepcopy(noteList) buckets[i] = [c, oldTime] i += 1 oldTime = round( mido.tick2second(absoluteTime, tpb, lastTempo), 3) noteList.remove(msg.note) buckets[i] = [noteList, oldTime] factor, score = calculate(queryMIDI, buckets[:i + 1], timesInput) return (score, factor)
def _parse_notes(midi_file: mido.MidiFile) -> List[_MidiNote]: ticks_per_beat: int = midi_file.ticks_per_beat tempo_bpm: float = MidiParser._DEFAULT_TEMPO_BPM held_notes: List[_MidiNote] = [] completed_notes: List[_MidiNote] = [] current_tick: int = 0 current_time: float = 0.0 current_bar: float = 1.0 current_bar_factor: int = 1 for msg, track in MidiParser.__merge_tracks(midi_file.tracks): current_tick += msg.time current_time += mido.tick2second(msg.time, ticks_per_beat, mido.bpm2tempo(tempo_bpm)) current_bar += msg.time / ticks_per_beat * current_bar_factor track_name: str = track.name if track else "" if msg.type == 'set_tempo': tempo_bpm = mido.tempo2bpm(msg.tempo) elif msg.type == 'time_signature': current_bar_factor = msg.denominator / (4 * msg.numerator) elif msg.type == 'note_on' or msg.type == 'note_off': # if the note is already held, complete the note regardless of whether the input is a note_on or # note_off message. This is to avoid duplicate notes in a slice if re-triggered before its note_off # See test case "keithjarrett_kolnconcert_Right.mid" bars 311 to 330 for an example of this. completed: List[_MidiNote] = [ note for note in held_notes if note.matches(msg.note, msg.channel, track_name) ] for note in completed: note.end_tick = current_tick note.end_time = current_time completed_notes.extend(completed) held_notes = [ note for note in held_notes if not note.matches(msg.note, msg.channel, track_name) ] if msg.type == 'note_on' and msg.velocity > 0: held_notes.append( _MidiNote(msg.note, msg.velocity, msg.channel, current_tick, current_time, tempo_bpm, current_bar, track_name)) # elif msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0): for note in held_notes: # If note ons still exist at end of midi file, generate note offs for all of them note.end_tick = current_tick note.end_time = current_time completed_notes.append(note) return completed_notes
def midi_to_p_roll(mid, Nyquist_rate, sample_duration, pitch_range): ''' Description: Converts a MIDI file into a piano roll of required time length and pitch range. **Algorithm was inspired by the method adopted by Jain et al. (http://cs229.stanford.edu/proj2019aut/data/assignment_308832_raw/26583519.pdf) ''' piano_size = pitch_range[1] - pitch_range[0] p_roll = np.zeros([piano_size + 1, Nyquist_rate * sample_duration]) track = mido.merge_tracks(mid.tracks) current_time = 0 current_position = 0 on_notes = np.zeros(piano_size + 1) tempo = 0 for msg in track: if msg.time > 0: delta = mido.tick2second(msg.time, mid.ticks_per_beat, tempo) else: delta = 0 if hasattr(msg, "note"): if msg.type == "note_on": if pitch_range[0] <= msg.note <= pitch_range[1]: on_notes[msg.note - pitch_range[0]] = msg.velocity else: if pitch_range[0] <= msg.note <= pitch_range[1]: on_notes[msg.note - pitch_range[0]] = 0 last_time = current_time current_time += delta if current_time > sample_duration: break new_position = np.floor(current_time * Nyquist_rate).astype(int) if new_position > current_position: new_position = np.floor(current_time * Nyquist_rate).astype(int) block = np.tile(on_notes.reshape(piano_size + 1, 1), new_position - current_position) p_roll[:, current_position:new_position] = block current_position = new_position if hasattr(msg, "tempo"): tempo = msg.tempo return p_roll
def parse_meta(meta, num_segments, ticks_per_beat, tick_interval): meta_tempo = np.zeros((num_segments), dtype=np.float32) start = 0 end = 0 previous_tempo = 0 for msg in meta: splits = str(msg).split(" ") time = int(splits[-1][5:-1]) if previous_tempo > 0: end = start + int(mido.tick2second(time, ticks_per_beat, previous_tempo) / tick_interval) meta_tempo[start:end] = int(mido.second2tick(tick_interval, ticks_per_beat, previous_tempo)) if "set_tempo" in str(msg): previous_tempo = int(splits[3][6:]) start = end return meta_tempo
def process_track(track: mido.midifiles.tracks.MidiTrack, ticks_per_beat: int, tempo: int) -> List[NoteInfo]: ret: List[NoteInfo] = [] curr_tick = 0 for msg in track: curr_tick += msg.time if hasattr(msg, "velocity"): if msg.velocity > 0 and msg.type == "note_on": ret.append({ "note_start": mido.tick2second(curr_tick, ticks_per_beat, tempo) * 1000, "midi_note_num": msg.note, }) return ret
def calculate_note_times_seconds(input_filepath): """Calculate times of note onsets (in seconds).""" midi_file = mido.MidiFile(input_filepath) midi_track = load_melody_from_file(input_filepath) tempo = midi_track[1].tempo # ppq = midi_track[3].clocks_per_click # n32 = midi_track[3].notated_32nd_notes_per_beat ppq = midi_file.ticks_per_beat note_times = [ mido.tick2second(msg.time, ppq, tempo) for msg in midi_track if "note" in msg.type ] note_times_summed = [] for i, t in enumerate(note_times): note_times_summed.append(sum(note_times[:i]) + t) return note_times_summed
def _get_note_on_times(self, include_negative=None): """ Gets the starting time of each note. """ note_times = [] current_time = 0 for msg in self.mido_track: if not msg.is_meta: delta_time = mido.tick2second(msg.time, self.ticks_per_beat, self.tempo) current_time += delta_time if msg.type == "note_on" and msg.velocity > 0: if include_negative or current_time >= 0: note_times.append(current_time) self.note_on_times = np.array(note_times)
def draw_time_scale(self, animate=True): time_scale = self.get_time_scale() # convert time units from tick -> second tick = self.get_time_in_ticks() second = mido.tick2second( tick, self.ticks_per_beat, self.get_tempo()) # print(second) if second > 10: x_label_period_sec = second // 10 else: x_label_period_sec = second / 10 # milliseconds # print(x_label_period_sec) x_label_interval = mido.second2tick( x_label_period_sec, self.ticks_per_beat, self.get_tempo()) if animate is False: plt.xticks([int(x * x_label_interval) for x in range(20)], [round(x * x_label_period_sec, 2) for x in range(20)]) # modify label and scale of y axis plt.yticks([y*16 for y in range(8)], [y*16 for y in range(8)]) channel_nb = 16 transparent = colorConverter.to_rgba('black') colors = [mpl.colors.to_rgba(mpl.colors.hsv_to_rgb( (i / channel_nb, 1, 1)), alpha=1) for i in range(channel_nb)] cmaps = [mpl.colors.LinearSegmentedColormap.from_list('my_cmap', [transparent, colors[i]], 128) for i in range(channel_nb)] # get a color for each channel for i in range(channel_nb): cmaps[i]._init() # create your alpha array and fill the colormap with them. alphas = np.linspace(0, 1, cmaps[i].N + 3) # create the _lut array, with rgba values cmaps[i]._lut[:, -1] = alphas # draw notes in time sacle for i in range(channel_nb): try: self.axes.imshow( time_scale[i], origin="lower", interpolation='nearest', cmap=cmaps[i], aspect='auto') except IndexError: pass
def __iter__(self): if self.type == 2: raise TypeError("can't merge tracks in type 2 (asynchronous) file") tempo = DEFAULT_TEMPO for msg, track in merge_tracks(self.tracks): # Convert message time from absolute time # in ticks to relative time in seconds. if msg.time > 0: delta = mido.tick2second(msg.time, self.ticks_per_beat, tempo) else: delta = 0 yield msg.copy(time=delta), track if msg.type == 'set_tempo': tempo = msg.tempo
def midi_to_seq(midi_file, track): """ Converts a MIDO track object into an event sequence """ print('midi_to_seq:midi_file', midi_file) print('midi_to_seq:Track =', track.name) print('midi_to_seq:ticks_per_beat =', midi_file.ticks_per_beat) events = [] tempo = None last_velocity = None for msg in track: event_type = msg.type # Parse delta time if msg.time != 0: seconds = mido.tick2second(msg.time, midi_file.ticks_per_beat, tempo) events += list(seconds_to_events(seconds)) # Ignore meta messages if msg.is_meta: if msg.type == 'set_tempo': # Handle tempo setting tempo = msg.tempo continue # Ignore control changes if event_type != 'note_on' and event_type != 'note_off': continue if event_type == 'note_on': velocity = (msg.velocity) // (MIDI_VELOCITY // VEL_QUANTIZATION) elif event_type == 'note_off': velocity = 0 # If velocity is different, we update it if last_velocity != velocity: events.append(VEL_OFFSET + velocity) last_velocity = velocity events.append(NOTE_ON_OFFSET + msg.note) return np.array(events)
def get_length(self, force=False, ticks=False): """ get total length of the song """ if force or not self.length: """ reset the legnth """ tempo = 600000 ppqn = self.ppqn spt = tempo / ppqn / 1000000 totalticks = 0 # tempo track related tempoticks = 0 tempolen = .0 lasttempo = 0 tempotrack = self.midi_file.tracks[0] # find the total tick / length of the tempo track for msg in tempotrack: tempoticks += msg.time tempolen += spt * msg.time if msg.is_meta and msg.type == 'set_tempo': if tempo != msg.tempo: tempo = msg.tempo spt = tempo / ppqn / 1000000 lasttempo = msg.tempo # find the largest ticks in the tracks for track in self.midi_file.tracks: ticks = 0 for msg in track: ticks += msg.time if ticks > totalticks: totalticks = ticks # calculate and set the length self.length = tempolen + mido.tick2second(totalticks - tempoticks, ppqn, lasttempo) self.totalticks = totalticks if ticks: return self.totalticks else: return self.length
def getAllKeyInfo(self): pianoKeys = [ ] pianoTick = [ ] for msg in self.rightHandMsg: #note = aubio.midi2note(msg.note) pianoKeys.append((msg.note, msg.velocity)) # velocity is the volume pianoTick.append(msg.time) pianoTime = [ ] for tick in pianoTick: time = mido.tick2second(tick, self.tpb, self.tempo) pianoTime.append(time) allKeyInfo = [ ] for i in range(len(pianoKeys)): note, vol = pianoKeys[i] duration = pianoTime[i] allKeyInfo.append((note, vol, duration)) return allKeyInfo