示例#1
0
    def parse_midi(self,
                   pitch_low_passband=0,
                   pitch_high_passband=127,
                   time_offset=0.0):
        """
        Parse the binary MIDI file (@self._path) and convert to list of notes with
        pitch, onset, and offset times in seconds.
            Note: only uses the first track in the MIDI file.

        PARAMETERS:
            pitch_low_passband (int): discard any pitches with midi numbers less than this bound
            pitch_high_passband (int): discard any pitches with midi numbers greater than this bound
            time_offset (float): amount of time (s) to delay MIDI note events

        RETURNS:
            notes (list of NoteEvents)
        """

        midi = PrettyMIDI(self._path)
        midi.remove_invalid_notes()

        notes = []
        if len(midi.instruments):
            for midi_n in midi.instruments[0].notes:
                if pitch_low_passband <= midi_n.pitch <= pitch_high_passband:
                    pname, octave = Note.midi_to_pitch(midi_n.pitch)
                    note = Note(
                        pname,
                        octave,
                        onset_ts=(midi_n.start + time_offset),
                        offset_ts=(midi_n.end + time_offset),
                    )
                    note.onset_tick = midi.time_to_tick(note.onset_ts)
                    note.offset_tick = midi.time_to_tick(note.offset_ts)
                    notes.append(note)

        return notes
示例#2
0
def _calc_drum_matrices(
        pm: PrettyMIDI, instrument: Instrument, midi_total_notes: int,
        matrix_total_notes: int, ticks_per_note: float, hop_size: int,
        n_drums: int, midi_drum_map: Dict[int, int],
        min_onsets: int) -> (List[np.ndarray], List[np.ndarray]):
    onset_matrices = []
    vel_matrices = []

    # Create one matrix for the entire MIDI to prevent mem-alloc ops
    midi_onset_matrix = np.zeros((midi_total_notes, n_drums), dtype=np.int32)
    midi_vel_matrix = np.zeros((midi_total_notes, n_drums), dtype=np.int32)

    for note in instrument.notes:
        if note.pitch in midi_drum_map and note.velocity > 0:
            note_start_ticks = pm.time_to_tick(note.start)
            note_idx = int((note_start_ticks / ticks_per_note) + 0.5)
            drum_idx = midi_drum_map[note.pitch]
            vel = note.velocity

            # Ignore notes quantized forward past last beat of MIDI file
            if note_idx < midi_total_notes:
                midi_onset_matrix[note_idx, drum_idx] = 1

                prev_vel = midi_vel_matrix[note_idx, drum_idx]
                # If multiple hits exist, use the louder one
                midi_vel_matrix[note_idx, drum_idx] = max(prev_vel, vel)
        else:
            log.debug(f'MIDI note pitch of {note.pitch} not found or vel is '
                      f'equal to 0. Vel = {note.velocity}')

    # Create data points via a single pass over entire MIDI matrix
    for start_note in range(0, midi_total_notes - matrix_total_notes + 1,
                            hop_size):
        end_note = start_note + matrix_total_notes
        sub_onset_matrix = midi_onset_matrix[start_note:end_note, :]
        num_of_onsets = np.sum(sub_onset_matrix)

        if num_of_onsets >= min_onsets:
            onset_matrices.append(sub_onset_matrix)

            # Avoid unnecessary ndarray creation
            sub_vel_matrix = midi_vel_matrix[start_note:end_note, :]
            vel_matrices.append(sub_vel_matrix)

    return onset_matrices, vel_matrices
示例#3
0
    def from_file(
        cls,
        mid: PrettyMIDI,
        bpm: int,
        beats: int = 8,
        steps: int = 16,
    ):
        end_tick = beats * steps

        drum_track = _find_drum_track(mid)
        # Allocate the array
        pattern = np.empty((128, int(end_tick + 1)), dtype=np.object)
        pattern.fill(None)
        # Add up notes
        for note in drum_track.notes:
            note_start_tick = int(
                np.round(
                    mid.time_to_tick(note.start) / mid.resolution * steps))
            if note_start_tick <= end_tick:
                pattern[note.pitch, note_start_tick] = note
        return cls(pattern, bpm, beats, steps)
示例#4
0
def extract_notes(midi_handler: pretty_midi.PrettyMIDI):
    print("Total ticks:",
          midi_handler.time_to_tick(midi_handler.get_end_time()))
    print("Time signatures:", midi_handler.time_signature_changes)
    print("Resolution:", midi_handler.resolution)
    new_mid_notes = []
    avg_data = []

    if len(midi_handler.time_signature_changes) == 0:
        num = 4
        denom = 4
    else:
        num = midi_handler.time_signature_changes[0].numerator
        denom = midi_handler.time_signature_changes[0].denominator

    resolution = midi_handler.resolution
    ticks_per_bar = num * (resolution / (denom / 4))
    total_bars = int(
        midi_handler.time_to_tick(midi_handler.get_end_time()) //
        ticks_per_bar)

    for current_channel, instrument in enumerate(midi_handler.instruments):
        if instrument.is_drum:
            continue

        ch = []
        avg_data_ch = {}
        bar = {}
        sum_pitch = 0
        sum_dur = 0
        current_bar = int(
            midi_handler.time_to_tick(instrument.notes[0].start) //
            ticks_per_bar)

        for index, note in enumerate(instrument.notes):
            starting_tick = midi_handler.time_to_tick(note.start)
            nro_bar = int(starting_tick // ticks_per_bar)

            if nro_bar != current_bar:
                notes_per_bar = len(bar.keys())
                avg_data_ch[current_bar] = (sum_pitch / notes_per_bar,
                                            sum_dur / notes_per_bar)
                ch.append(bar)
                bar = {}
                current_bar = nro_bar
                sum_pitch = sum_dur = 0

            if starting_tick not in bar.keys():
                # We substract 12 pitch levels if
                # the note belongs to a different clef
                sum_pitch += note.pitch if note.pitch < 60 else (note.pitch -
                                                                 13)
                sum_dur += note.get_duration()
                bar[starting_tick] = (note.pitch, current_channel, nro_bar,
                                      midi_handler.time_to_tick(note.end),
                                      midi_handler.time_to_tick(note.duration),
                                      note.velocity)
            else:
                # If the current note overlaps with a previous one
                # (they play at the same time/tick)
                # we will keep the one with the highest pitch
                new_pitch = note.pitch if note.pitch < 60 else (note.pitch -
                                                                13)
                old_pitch = bar[starting_tick][0] if bar[starting_tick][
                    0] < 60 else (bar[starting_tick][0] - 13)

                if new_pitch > old_pitch:
                    old_duration = midi_handler.tick_to_time(
                        bar[starting_tick][4])

                    sum_pitch -= old_pitch
                    sum_dur -= old_duration

                    sum_pitch += new_pitch
                    sum_dur += note.get_duration()

                    bar[starting_tick] = (note.pitch, current_channel, nro_bar,
                                          midi_handler.time_to_tick(note.end),
                                          midi_handler.time_to_tick(
                                              note.duration), note.velocity)

        notes_per_bar = len(bar.keys())
        avg_data_ch[current_bar] = (sum_pitch / notes_per_bar,
                                    sum_dur / notes_per_bar)
        ch.append(bar)

        new_mid_notes.append(ch)
        avg_data.append(avg_data_ch)

    return [avg_data, new_mid_notes, total_bars]