示例#1
0
    def proc_midi(self, path_midi, is_label=True):
        # parse midi to pianoroll
        midi_obj = mid_parser.MidiFile(path_midi)
        notes = midi_obj.instruments[0].notes
        pianoroll = pr_parser.notes2pianoroll(
                            notes)

        # pianoroll to beat sync pianoroll
        pianoroll_sync = midi_extract_beat_sync_pianoroll(
                pianoroll,
                midi_obj.ticks_per_beat)  

        return self.process(pianoroll_sync, is_label=is_label)
示例#2
0
def identify_song(input_obj):
    # loading
    if isinstance(input_obj, str):
        midi_file = parser.MidiFile(path_midi)
    else:
        midi_file = input_obj

    # processing
    num_instr = len(midi_file.instruments)

    pianorolls = []
    for idx in range(num_instr):
        pr = midi_file.get_instrument_pianoroll(idx, resample_resolution=24)
        pianorolls.append(pr)
    ys = identify_multiple_track(pianorolls)
    return ys
示例#3
0
def get_notes(midi_file, track_name='MELODY'):
    """
    Get notes from MIDI file

    :param str midi: path to MIDI file as string or Path
    :param str track_name: name of track to use
    :return: midi object, notes list
    """
    midi_path = Path(midi_file)
    midi_song = mid_parser.MidiFile(str(midi_path))
    if len(midi_song.instruments) == 1:
        track_index = 0
    else:
        track_map = {
            name: num
            for num, name in enumerate([t.name for t in midi_song.instruments])
        }
        track_index = track_map.get(track_name)
    notes = midi_song.instruments[track_index].notes
    return midi_song, notes
示例#4
0
    def write_to_midi(self,
                      output_file,
                      chord_note_offset=0.05,
                      tempo=120,
                      chord_instrument=33):
        midi_obj = parser.MidiFile()
        midi_obj.tempo_changes = [TempoChange(tempo, 0)]
        midi_obj.instruments = [
            Instrument(0, name='melody'),
            Instrument(chord_instrument, name='chords')
        ]

        for note in self.notes:
            # print ('processing note:', note)
            midi_obj.instruments[0].notes.append(
                Note(note.velocity, note.pitch, sec2ticks(note.onset_sec),
                     sec2ticks(note.onset_sec + note.duration_sec)))

        for chord in self.chords:
            # print ('processing chord:', chord)
            if chord_note_offset * (len(chord.chord_notes) -
                                    1) > 0.5 * chord.duration_sec:
                offset = 0.5 * chord.duration_sec / (len(chord.chord_notes) -
                                                     1)
            else:
                offset = chord_note_offset

            midi_obj.instruments[1].notes.append(
                Note(chord.velocity, chord.bass, sec2ticks(chord.onset_sec),
                     sec2ticks(chord.onset_sec + chord.duration_sec)))

            for i, n in enumerate(chord.chord_notes):
                note_onset_sec = chord.onset_sec + offset * i
                midi_obj.instruments[1].notes.append(
                    Note(chord.velocity, n, sec2ticks(note_onset_sec),
                         sec2ticks(chord.onset_sec + chord.duration_sec)))

        midi_obj.dump(output_file)
        return
示例#5
0
def play_chords(midi_obj):
    default_velocity = 63
    midi_maps = [
        chord_to_midi(Chord(marker.text)) for marker in midi_obj.markers
    ]
    new_midi_obj = parser.MidiFile()
    new_midi_obj.time_signature_changes.append(
        containers.TimeSignature(numerator=4, denominator=4, time=0))
    new_midi_obj.instruments.append(
        containers.Instrument(program=0, is_drum=False, name='Piano'))

    for midi_map, prev_marker, next_marker in zip(midi_maps,
                                                  midi_obj.markers[:-1],
                                                  midi_obj.markers[1:]):
        for midi_pitch in midi_map:
            midi_note = containers.Note(start=prev_marker.time,
                                        end=next_marker.time,
                                        pitch=midi_pitch,
                                        velocity=default_velocity)
            new_midi_obj.instruments[0].notes.append(midi_note)

    return new_midi_obj
    def to_midi(self, output_path=None):
        midi = parser.MidiFile()

        midi.ticks_per_beat = DEFAULT_TICK_RESOL
        tempos = []
        chords = []
        instr_notes = defaultdict(list)

        time = 0
        for e in self.events:
            if isinstance(e, Bar):
                if e.tempo:
                    tempos += [ct.TempoChange(e.tempo, time)]
                if e.chord:
                    chords += [ct.Marker(e.chord, time)]
                time += DEFAULT_BAR_RESOL
            if isinstance(e, Note):
                s = DEFAULT_STEP * e.position + time
                instr_notes[e.inst_family] += [
                    ct.Note(e.velocity, e.pitch, s,
                            s + e.duration * DEFAULT_STEP)
                ]
        tempos.sort(key=lambda x: x.time)
        chords.sort(key=lambda x: x.time)

        instruments = []
        for k, v in instr_notes.items():
            inst = ct.Instrument(k * 8 if k < 16 else 0, k == 16)
            inst.notes = sorted(v, key=lambda x: x.start)
            instruments += [inst]

        midi.instruments = instruments
        midi.tempo_changes = tempos
        midi.key_signature_changes = []
        midi.time_signature_changes = []
        if output_path:
            midi.dump(output_path)
        return midi
示例#7
0
def identify_song(input_obj):
    # loading
    if isinstance(input_obj, str):
        midi_file = parser.MidiFile(path_midi)
    else:
        midi_file = input_obj

    # processing
    num_instr = len(midi_file.instruments)

    pianorolls = []
    for idx in range(num_instr):
        pr = midi_file.get_instrument_pianoroll(idx, resample_resolution=24)
        # pr = notes2pianoroll(midi_file.instruments[idx].notes,
        #                     midi_file.ticks_per_beat,
        #                     resample_factor = 24,
        #                     resample_method = round,
        #                     binary_thres = None,
        #                     max_tick = midi_file.max_tick,
        #                     keep_note = True)
        pianorolls.append(pr)
    ys = identify_multiple_track(pianorolls)
    return ys
示例#8
0
def align_midi(proc_res,
               path_midi_input,
               path_midi_output,
               ticks_per_beat=480):
    midi_data = parser.MidiFile(path_midi_input)

    # compute tempo
    beats = np.array([0.0] + list(proc_res[:, 0]))
    intervals = np.diff(beats)
    bpms = 60 / intervals
    tempo_info = list(zip(beats[:-1], bpms))

    # get absolute timing of instruments
    tick_to_time = midi_data.get_tick_to_time_mapping()
    abs_instr = get_instruments_abs_timing(midi_data.instruments, tick_to_time)

    # get end time of file
    end_time = midi_data.get_tick_to_time_mapping()[-1]

    # compute time to tick mapping
    resample_timing = []
    for i in range(len(beats) - 1):
        start_beat = beats[i]
        end_beat = beats[i + 1]
        resample_timing += interp_linear(start_beat, end_beat, ticks_per_beat)

    # fill the empty in the tail (using last tick interval)
    last_tick_interval = resample_timing[-1] - resample_timing[-2]
    cur_time = resample_timing[-1]
    while cur_time < end_time:
        cur_time += last_tick_interval
        resample_timing.append(cur_time)
    resample_timing = np.array(resample_timing)

    # new a midifile obj
    midi_res = parser.MidiFile()

    # convert abs to sym
    sym_instr = convert_instruments_timing_from_abs_to_sym(
        abs_instr, resample_timing)

    # time signature
    first_db_sec = find_first_downbeat(proc_res)
    first_db_tick = find_nearest_np(resample_timing, first_db_sec)
    time_signature_changes = [
        TimeSignature(numerator=4, denominator=4, time=int(first_db_tick))
    ]

    # tempo
    tempo_changes = []
    for pos, bpm in tempo_info:
        pos_tick = find_nearest_np(resample_timing, pos)
        tempo_changes.append(TempoChange(tempo=float(bpm), time=int(pos_tick)))

    # shift (pickup at the beginning)
    shift_align = ticks_per_beat * 4 - first_db_tick

    # apply shift to tempo
    for msg in tempo_changes:
        msg.time += shift_align

    # apply shift to notes
    for instr in sym_instr:
        for note in instr.notes:
            note.start += shift_align
            note.end += shift_align

    # set attributes
    midi_res.ticks_per_beat = ticks_per_beat
    midi_res.tempo_changes = tempo_changes
    midi_res.time_signature_changes = time_signature_changes
    midi_res.instruments = sym_instr

    # saving
    midi_res.dump(filename=path_midi_output)
示例#9
0
from miditoolkit.midi import parser as mid_parser
import pretty_midi as pm
import mido

# testcase
path_midi = 'groove/drummer1/eval_session/1_funk-groove1_138_beat_4-4.mid'

# load
mt_obj = mid_parser.MidiFile(path_midi)
pm_obj = pm.PrettyMIDI(path_midi)
mido_obj = mido.MidiFile(path_midi)

# check
print('pretty-midi:', len(pm_obj.instruments[0].notes))
print('miditoolkit:', len(mt_obj.instruments[0].notes))

# mido msg
cnt_note_on = 0
cnt_note_off = 0
note_ons = []
note_offs = []
for msg in mido_obj:
    if msg.type == 'note_on':
        if msg.velocity == 0:
            note_offs.append(msg)
        else:
            note_ons.append(msg)
    if msg.type == 'note_off':
        note_offs.append(msg)

# check
示例#10
0
import miditoolkit.midi.parser as mid_parser
from miditoolkit.midi import containers as ct

import numpy as np
from io import BytesIO

# -------------------------------------- #
#  Create dummy file                     #
# -------------------------------------- #
# create an empty file
mido_obj = mid_parser.MidiFile()
beat_resol = mido_obj.ticks_per_beat

# create an  instrument
track = ct.Instrument(program=0, is_drum=False, name='example track')
mido_obj.instruments = [track]

# create eighth notes
duration = int(beat_resol * 0.5)
prev_end = 0
pitch = 60
print(' > create a dummy file')
for i in range(10):
    # create one note
    start = prev_end
    end = prev_end + duration
    pitch = pitch
    velocity = np.random.randint(1, 127)
    note = ct.Note(start=start, end=end, pitch=pitch, velocity=velocity)
    print(i, note)
    mido_obj.instruments[0].notes.append(note)
示例#11
0
def preprocess(path_midi, dump_path='tmp.mid'):
    midi_file = parser.MidiFile(path_midi)
    ys = proc.identify_song(midi_file)
    midx = list(np.where(np.array(ys) == 0)[0])
    midi_file.dump(filename=dump_path, instrument_idx=midx)
示例#12
0
def gen_midi(note_seq, out_file):
    #Check Seq
    seq = []
    current_is_pitch = 'PITCH'

    #遍历寻找pitch-duration的结构
    #当有不合法情况出现时,找最后一个pitch和第一个duration,保证其相邻
    #p1 d1 p2 p3 d2 p4 d3-> p1 d1 p3 d1 p4 d3
    #p1 d1 p2 d2 d3 p3 d4-> p1 d1 p2 d2 p3 d4
    #p1 d1 p2 p3 d2 d3 p4 d4 -> p1 d1 p3 d2 p4 d4

    i = 0
    while (i < len(note_seq)):
        if note_seq[i] > 128:
            #Duration
            i += 1
            continue
        else:
            #Pitch
            if i + 1 >= len(note_seq):
                #No Duration Followed
                break
            if note_seq[i + 1] <= 128:
                #Followed by a pitch
                i += 1
                continue

            #Followed by a duration
            pitch = note_seq[i]
            duration = float(Duration_vocab[note_seq[i + 1]])
            seq.append((pitch, duration))
            i += 2

    # pattern = midi.Pattern()
    # track = midi.Track()
    # pattern.append(track)

    # create an empty file
    mido_obj = mid_parser.MidiFile()
    beat_resol = mido_obj.ticks_per_beat

    # create an  instrument
    track = ct.Instrument(program=0, is_drum=False, name='Lead')
    mido_obj.instruments = [track]

    prev_end = 0
    # rest_time = 0

    for note in seq:
        print(note)
        duration = round(note[1] * beat_resol)
        if note[0] < 128:  # Pitch
            start = prev_end
            end = prev_end + duration
            print(f"{note[0]} from {start} to {end}")
            nt = ct.Note(start=start, end=end, pitch=note[0], velocity=100)
            mido_obj.instruments[0].notes.append(nt)
            prev_end += duration

            # Instantiate a MIDI note on event, append it to the track
            # tick:according to last?
            # on = midi.NoteOnEvent(tick=rest_time, velocity=100, pitch=note[0])
            # track.append(on)

            # # Instantiate a MIDI note off event, append it to the track
            # off = midi.NoteOffEvent(tick=round(note[1]*mspb), pitch=note[0])
            # track.append(off)

            # rest_time = 0
        else:  # Rest
            assert note[0] == REST_NOTE
            prev_end += duration
            # rest_time += round(note[1]*mspb)

    # create makers
    marker_hi = ct.Marker(time=0, text='HI')
    mido_obj.markers.append(marker_hi)

    mido_obj.tempo_changes.append(ct.TempoChange(240, 0))

    mido_obj.dump(out_file)
示例#13
0
from miditoolkit.midi import parser
from prtoolkit import vis, utils
from pypianoroll import Multitrack, Track
from matplotlib import pyplot as plt

# load file
path_midi = 'test_midis/test_1.mid'
data = parser.MidiFile(path_midi)

# load file
midifile = 'test.mid'
multi = Multitrack(midifile)

# select track
track = multi.tracks[2]

# get pianoroll (time x pitch)
pr = track.pianoroll

# ---------------------------------------------
# Example 1: visialize the piano roll
#   simmulate the scenario that the piano rolls
#   are usually downsampled and cropped

# downsample
pr = pr[::2, :]
resol = multi.beat_resolution // 2
# downbeats = multi.downbeat
downbeats = 4  # beats per bar

# crop by pitch