def _join_tracks(left_track, right_track): default_tempo = mido.bpm2tempo(120) default_ticks_per_beat = 480 messages = [] for msg in _to_abstime(left_track): is_note_on = (msg.type == 'note_on') is_note_off = (msg.type == 'note_off') if is_note_on or is_note_off: time = mido.tick2second(msg.time, default_ticks_per_beat, default_tempo) event = MidiEvent(pitch=msg.note, is_note_on=is_note_on, when=time, is_left=True) messages.append(event) for msg in _to_abstime(right_track): is_note_on = (msg.type == 'note_on') is_note_off = (msg.type == 'note_off') if is_note_on or is_note_off: time = mido.tick2second(msg.time, default_ticks_per_beat, default_tempo) event = MidiEvent(pitch=msg.note, is_note_on=is_note_on, when=time, is_left=False) messages.append(event) messages.sort(key=lambda msg: msg.when) return messages
def musical_extract_midi(path: str): """Generates (BPM: float, duration since last: float, [pitch: int])""" f = MidiFile(path) assert f.type != 2, "asynchronous midi files are not supported yet" messages = [] for track in f.tracks: messages.extend(_to_abstime(track)) assert messages, "failed to find messages. Erroneous file?" messages.sort(key=attrgetter('time')) tempo = DEFAULT_TEMPO last = tick = 0 output = set() for msg in messages: if msg.type == 'note_on' and msg.velocity > 0: if msg.time == tick: # Current hit output.add(msg.note) elif output: # New hit yield tempo2bpm(tempo), (tick - last) / f.ticks_per_beat, list(output) output = {msg.note} last, tick = tick, msg.time else: # Non-0 Beginning last = tick = msg.time output.add(msg.note) elif msg.type == 'set_tempo': tempo = msg.tempo # Last hit if output: yield tempo2bpm(tempo), (tick - last) / f.ticks_per_beat, list(output)
def readAndProcessMidi(path: str, resolution=1 / 8): """Path is supposed to lead to a valid midi file""" f = MidiFile(path) assert f.type != 2, "asynchronous midi files are not supported yet" messages = [] for track in f.tracks: messages.extend(_to_abstime(track)) assert messages, "failed to find messages. Erroneous file?" messages = [m for m in messages if m.type == 'note_on' and m.velocity > 0] messages.sort(key=attrgetter('time')) output = set() TOLERANCE = resolution / 4 i = skipped = 0 last = beat = messages[i].time / f.ticks_per_beat while i < len(messages): if messages[i].time / f.ticks_per_beat < beat - TOLERANCE: # Falls behind, then discard skipped += 1 i += 1 elif messages[i].time / f.ticks_per_beat <= beat + TOLERANCE: # Collected output.add(messages[i].note) i += 1 else: # Exceeded, then advance if output: yield int((beat - last) / resolution), list(output) output.clear() last = beat beat += resolution if output: # Last notes yield int((beat - last) / resolution), list(output) print(f" {path} - Total={i}; Skipped={skipped}; Loss={skipped / i * 100:.2f}%")
def merge_tracks(self, tracks, ticks_per_beat): """Returns a MidiTrack object with all messages from all tracks. The messages are returned in playback order with delta times as if they were all in one track. """ messages = [] for track in tracks: messages.extend(_to_abstime(track)) aux_msgs = list() print("Preload amt {} and common tone {}".format(self.preload_chord_amt, self.common_chord_tone_amt)) for msg in messages: if (self.preload_chord_amt > 0.0 or self.common_chord_tone_amt > 0.0) and msg.type == 'lyrics' and msg.time >= ticks_per_beat * self.preload_chord_amt: # push lyrics back a bit if self.common_chord_tone_amt > 0.0: m = MetaMessage(type='marker', time = msg.time - ticks_per_beat*self.common_chord_tone_amt, text=msg.text) aux_msgs.append(m) if self.preload_chord_amt > 0.0: msg.time -= ticks_per_beat * self.preload_chord_amt messages += aux_msgs messages.sort(key=lambda msg: msg.time) return MidiTrack(fix_end_of_track(_to_reltime(messages)))
def __init__(self, track, ticks_per_beat=48): self.id = uuid.uuid4() self.ticks_per_beat = ticks_per_beat note_map = 128 * [None] self.notes = [] self.tempos = [] for msg in _to_abstime(track): #print(msg) if msg.type == 'note_on' and msg.velocity > 0: note_map[msg.note] = msg elif msg.type in ['note_on', 'note_off']: onset = note_map[msg.note] self.notes.append( MidiNote(msg.note, onset.time, msg.time - onset.time, onset.velocity)) elif msg.type == 'set_tempo': self.tempos.append((msg.time, msg.tempo)) self.notes.sort(key=lambda x: (x.time, x.note))
def _parse_notes(track, f): midi_note_messages = [] for msg in mitracks._to_abstime(track): if msg.type == 'note_on' or msg.type == 'note_off': midi_note_messages.append(msg) start = None roll = None for i in range(0, len(midi_note_messages)): if midi_note_messages[i].type == 'note_on': if start is None: start = midi_note_messages[i].time new = [midi_note_messages[i].note, midi_note_messages[i].time, 0] for j in range(i + 1, len(midi_note_messages)): if midi_note_messages[ j].type == 'note_off' and midi_note_messages[ j].note == new[0]: new[2] += midi_note_messages[j].time break if 0 < new[0] < 88: roll = _add_note_to_roll(roll, new, f) return roll, int(start / f)
def musical_extract_midi_with_inst(path: str, offset=True): """Generates (BPM: float, duration since last: float, [(pitch: int, instrument_index: int)])""" if offset: offset = 1 else: offset = 0 f = MidiFile(path) assert f.type != 2, "asynchronous midi files are not supported yet" messages = [] for track in f.tracks: messages.extend(_to_abstime(track)) assert messages, "failed to find messages. Erroneous file?" messages.sort(key=attrgetter('time')) tempo = DEFAULT_TEMPO last = tick = 0 output = set() insts = dict() for msg in messages: if msg.type == 'note_on' and msg.velocity > 0: if msg.time == tick: # Current hit output.add((msg.note, insts.get(msg.channel, offset))) elif output: # New hit yield (tempo2bpm(tempo), (tick - last) / f.ticks_per_beat, list(output)) output = {(msg.note, insts.get(msg.channel, offset))} last, tick = tick, msg.time else: # Non-0 Beginning last = tick = msg.time output.add((msg.note, insts.get(msg.channel, offset))) elif msg.type == 'set_tempo': tempo = msg.tempo elif msg.type == 'program_change': insts[msg.channel] = msg.program + offset # Last hit if output: yield tempo2bpm(tempo), (tick - last) / f.ticks_per_beat, list(output)
def parse_notes(track): # TODO: TORCH midi_note_messages = [] for msg in mitracks._to_abstime(track): if msg.type == 'note_on' or msg.type == 'note_off': midi_note_messages.append(msg) notes = [] start = None for i in range(0, len(midi_note_messages)): if midi_note_messages[i].type == 'note_on': if start is None: start = midi_note_messages[i].time new = [ midi_note_messages[i].note, midi_note_messages[i].time, -midi_note_messages[i].time ] for j in range(i + 1, len(midi_note_messages)): if midi_note_messages[ j].type == 'note_off' and midi_note_messages[ j].note == new[0]: new[2] += midi_note_messages[j].time break notes.append(new) return clean_identical_notes(notes, 128), start