Beispiel #1
0
def test_adjust_times():
    # Simple tests for adjusting note times
    def simple():
        pm = pretty_midi.PrettyMIDI()
        i = pretty_midi.Instrument(0)
        # Create 9 notes, at times [1, 2, 3, 4, 5, 6, 7, 8, 9]
        for n, start in enumerate(range(1, 10)):
            i.notes.append(pretty_midi.Note(100, 100 + n, start, start + .5))
        pm.instruments.append(i)
        return pm

    # Test notes are interpolated as expected
    pm = simple()
    pm.adjust_times([0, 10], [5, 20])
    for note, start in zip(pm.instruments[0].notes,
                           1.5 * np.arange(1, 10) + 5):
        assert note.start == start
    # Test notes are all ommitted when adjustment range doesn't cover them
    pm = simple()
    pm.adjust_times([10, 20], [5, 10])
    assert len(pm.instruments[0].notes) == 0
    # Test repeated mapping times
    pm = simple()
    pm.adjust_times([0, 5, 6.5, 10], [5, 10, 10, 17])
    # Original times  [1, 2, 3, 4,  7,  8,  9]
    # The notes at times 5 and 6 have their durations squashed to zero
    expected_starts = [6, 7, 8, 9, 11, 13, 15]
    assert np.allclose([n.start for n in pm.instruments[0].notes],
                       expected_starts)
    pm = simple()
    pm.adjust_times([0, 5, 5, 10], [5, 10, 12, 17])
    # Original times  [1, 2, 3, 4,  5,  6,  7,  8,  9]
    expected_starts = [6, 7, 8, 9, 12, 13, 14, 15, 16]
    assert np.allclose([n.start for n in pm.instruments[0].notes],
                       expected_starts)

    # Complicated example
    pm = simple()
    # Include pitch bends and control changes to test adjust_events
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(100, 1.))
    # Include event which fall within omitted region
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(200, 7.))
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(0, 7.1))
    # Include event which falls outside of the track
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 10.))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 0, .5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 1, 5.5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 2, 7.5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 3, 20.))
    # Include track-level meta events to test adjust_meta
    pm.time_signature_changes.append(pretty_midi.TimeSignature(3, 4, .1))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(4, 4, 5.2))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(6, 4, 6.2))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(5, 4, 15.3))
    pm.key_signature_changes.append(pretty_midi.KeySignature(1, 1.))
    pm.key_signature_changes.append(pretty_midi.KeySignature(2, 6.2))
    pm.key_signature_changes.append(pretty_midi.KeySignature(3, 7.2))
    pm.key_signature_changes.append(pretty_midi.KeySignature(4, 12.3))
    # Add in tempo changes - 100 bpm at 0s
    pm._tick_scales[0] = (0, 60. / (100 * pm.resolution))
    # 110 bpm at 6s
    pm._tick_scales.append((2200, 60. / (110 * pm.resolution)))
    # 120 bpm at 8.1s
    pm._tick_scales.append((3047, 60. / (120 * pm.resolution)))
    # 150 bpm at 8.3s
    pm._tick_scales.append((3135, 60. / (150 * pm.resolution)))
    # 80 bpm at 9.3s
    pm._tick_scales.append((3685, 60. / (80 * pm.resolution)))
    pm._update_tick_to_time(20000)

    # Adjust times, with a collapsing section in original and new times
    pm.adjust_times([2., 3.1, 3.1, 5.1, 7.5, 10], [5., 6., 7., 8.5, 8.5, 11])

    # Original tempo change times: [0, 6, 8.1, 8.3, 9.3]
    # Plus tempo changes at each of new_times which are not collapsed
    # Plus tempo change at 0s by default
    expected_times = [
        0., 5., 6., 8.5, 8.5 + (6 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (8.1 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (8.3 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (9.3 - 5.1) * (11 - 8.5) / (10 - 5.1)
    ]
    # Tempos scaled by differences in timing, plus 120 bpm at the beginning
    expected_tempi = [
        120., 100 * (3.1 - 2) / (6 - 5), 100 * (5.1 - 3.1) / (8.5 - 6),
        100 * (10 - 5.1) / (11 - 8.5), 110 * (10 - 5.1) / (11 - 8.5),
        120 * (10 - 5.1) / (11 - 8.5), 150 * (10 - 5.1) / (11 - 8.5),
        80 * (10 - 5.1) / (11 - 8.5)
    ]
    change_times, tempi = pm.get_tempo_changes()
    # Due to the fact that tempo change times must occur at discrete ticks, we
    # must raise the relative tolerance when comparing
    assert np.allclose(expected_times, change_times, rtol=.001)
    assert np.allclose(expected_tempi, tempi, rtol=.002)

    # Test that all other events were interpolated as expected
    note_starts = [
        5., 5 + 1 / 1.1, 7 + .9 / (2 / 1.5), 7 + 1.9 / (2 / 1.5), 8.5 + .5,
        8.5 + 1.5
    ]
    note_ends = [
        5 + .5 / 1.1, 7 + .4 / (2 / 1.5), 7 + 1.4 / (2 / 1.5), 8.5, 9 + .5,
        10 + .5
    ]
    note_pitches = [101, 102, 103, 104, 107, 108, 109]
    for note, s, e, p in zip(pm.instruments[0].notes, note_starts, note_ends,
                             note_pitches):
        assert note.start == s
        assert note.end == e
        assert note.pitch == p

    bend_times = [5., 8.5, 8.5]
    bend_pitches = [100, 200, 0]
    for bend, t, p in zip(pm.instruments[0].pitch_bends, bend_times,
                          bend_pitches):
        assert bend.time == t
        assert bend.pitch == p

    cc_times = [5., 8.5, 8.5]
    cc_values = [0, 1, 2]
    for cc, t, v in zip(pm.instruments[0].control_changes, cc_times,
                        cc_values):
        assert cc.time == t
        assert cc.value == v

    # The first time signature change will be placed at the first interpolated
    # downbeat location - so, start by computing the location of the first
    # downbeat after the start of original_times, then interpolate it
    first_downbeat_after = .1 + 2 * 3 * 60. / 100.
    first_ts_time = 7 + (first_downbeat_after - 3.1) / (2 / 1.5)
    ts_times = [first_ts_time, 8.5, 8.5]
    ts_numerators = [3, 4, 6]
    for ts, t, n in zip(pm.time_signature_changes, ts_times, ts_numerators):
        assert ts.time == t
        assert ts.numerator == n

    ks_times = [5., 8.5, 8.5]
    ks_keys = [1, 2, 3]
    for ks, t, k in zip(pm.key_signature_changes, ks_times, ks_keys):
        assert ks.time == t
        assert ks.key_number == k
Beispiel #2
0
def note_sequence_to_pretty_midi(sequence,
                                 drop_events_n_seconds_after_last_note=None):
    """Convert NoteSequence to a PrettyMIDI.

  Time is stored in the NoteSequence in absolute values (seconds) as opposed to
  relative values (MIDI ticks). When the NoteSequence is translated back to
  PrettyMIDI the absolute time is retained. The tempo map is also recreated.

  Args:
    sequence: A NoteSequence.
    drop_events_n_seconds_after_last_note: Events (e.g., time signature changes)
        that occur this many seconds after the last note will be dropped. If
        None, then no events will be dropped.

  Returns:
    A pretty_midi.PrettyMIDI object or None if sequence could not be decoded.
  """
    ticks_per_quarter = sequence.ticks_per_quarter or constants.STANDARD_PPQ

    max_event_time = None
    if drop_events_n_seconds_after_last_note is not None:
        max_event_time = (max([n.end_time for n in sequence.notes] or [0]) +
                          drop_events_n_seconds_after_last_note)

    # Try to find a tempo at time zero. The list is not guaranteed to be in order.
    initial_seq_tempo = None
    for seq_tempo in sequence.tempos:
        if seq_tempo.time == 0:
            initial_seq_tempo = seq_tempo
            break

    kwargs = {}
    if initial_seq_tempo:
        kwargs['initial_tempo'] = initial_seq_tempo.qpm
    else:
        kwargs['initial_tempo'] = constants.DEFAULT_QUARTERS_PER_MINUTE

    pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs)

    # Create an empty instrument to contain time and key signatures.
    instrument = pretty_midi.Instrument(0)
    pm.instruments.append(instrument)

    # Populate time signatures.
    for seq_ts in sequence.time_signatures:
        if max_event_time and seq_ts.time > max_event_time:
            continue
        time_signature = pretty_midi.containers.TimeSignature(
            seq_ts.numerator, seq_ts.denominator, seq_ts.time)
        pm.time_signature_changes.append(time_signature)

    # Populate key signatures.
    for seq_key in sequence.key_signatures:
        if max_event_time and seq_key.time > max_event_time:
            continue
        key_number = seq_key.key
        if seq_key.mode == seq_key.MINOR:
            key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET
        key_signature = pretty_midi.containers.KeySignature(
            key_number, seq_key.time)
        pm.key_signature_changes.append(key_signature)

    # Populate tempos.
    # TODO(douglaseck): Update this code if pretty_midi adds the ability to
    # write tempo.
    for seq_tempo in sequence.tempos:
        # Skip if this tempo was added in the PrettyMIDI constructor.
        if seq_tempo == initial_seq_tempo:
            continue
        if max_event_time and seq_tempo.time > max_event_time:
            continue
        tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm)
        tick = pm.time_to_tick(seq_tempo.time)
        # pylint: disable=protected-access
        pm._tick_scales.append((tick, tick_scale))
        pm._update_tick_to_time(0)
        # pylint: enable=protected-access

    # Populate instrument events by first gathering notes and other event types
    # in lists then write them sorted to the PrettyMidi object.
    instrument_events = collections.defaultdict(
        lambda: collections.defaultdict(list))
    for seq_note in sequence.notes:
        instrument_events[(seq_note.instrument, seq_note.program,
                           seq_note.is_drum)]['notes'].append(
                               pretty_midi.Note(seq_note.velocity,
                                                seq_note.pitch,
                                                seq_note.start_time,
                                                seq_note.end_time))
    for seq_bend in sequence.pitch_bends:
        if max_event_time and seq_bend.time > max_event_time:
            continue
        instrument_events[(seq_bend.instrument, seq_bend.program,
                           seq_bend.is_drum)]['bends'].append(
                               pretty_midi.PitchBend(seq_bend.bend,
                                                     seq_bend.time))
    for seq_cc in sequence.control_changes:
        if max_event_time and seq_cc.time > max_event_time:
            continue
        instrument_events[(seq_cc.instrument, seq_cc.program,
                           seq_cc.is_drum)]['controls'].append(
                               pretty_midi.ControlChange(
                                   seq_cc.control_number, seq_cc.control_value,
                                   seq_cc.time))

    for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()):
        # For instr_id 0 append to the instrument created above.
        if instr_id > 0:
            instrument = pretty_midi.Instrument(prog_id, is_drum)
            pm.instruments.append(instrument)
        instrument.program = prog_id
        instrument.notes = instrument_events[(instr_id, prog_id,
                                              is_drum)]['notes']
        instrument.pitch_bends = instrument_events[(instr_id, prog_id,
                                                    is_drum)]['bends']
        instrument.control_changes = instrument_events[(instr_id, prog_id,
                                                        is_drum)]['controls']

    return pm
Beispiel #3
0
def sequence_proto_to_pretty_midi(sequence):
  """Convert tensorflow.magenta.NoteSequence proto to a PrettyMIDI.

  Time is stored in the NoteSequence in absolute values (seconds) as opposed to
  relative values (MIDI ticks). When the NoteSequence is translated back to
  PrettyMIDI the absolute time is retained. The tempo map is also recreated.

  Args:
    sequence: A tensorfow.magenta.NoteSequence proto.

  Returns:
    A pretty_midi.PrettyMIDI object or None if sequence could not be decoded.
  """

  kwargs = {}
  if sequence.tempos and sequence.tempos[0].time == 0:
    kwargs['initial_tempo'] = sequence.tempos[0].bpm
  pm = pretty_midi.PrettyMIDI(resolution=sequence.ticks_per_beat, **kwargs)

  # Create an empty instrument to contain time and key signatures.
  instrument = pretty_midi.Instrument(0)
  pm.instruments.append(instrument)

  # Populate time signatures.
  for seq_ts in sequence.time_signatures:
    time_signature = pretty_midi.containers.TimeSignature(
        seq_ts.numerator, seq_ts.denominator, seq_ts.time)
    pm.time_signature_changes.append(time_signature)

  # Populate key signatures.
  for seq_key in sequence.key_signatures:
    key_number = seq_key.key
    if seq_key.mode == seq_key.MINOR:
      key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET
    key_signature = pretty_midi.containers.KeySignature(
        key_number, seq_key.time)
    pm.key_signature_changes.append(key_signature)

  # Populate tempo. The first tempo change was done in PrettyMIDI constructor.
  # TODO(@douglaseck): Update this code if pretty_midi adds the ability to
  # write tempo.
  if len(sequence.tempos) > 1:
    for seq_tempo in sequence.tempos[1:]:
      tick_scale = 60.0 / (pm.resolution * seq_tempo.bpm)
      tick = pm.time_to_tick(seq_tempo.time)
      # pylint: disable=protected-access
      pm._PrettyMIDI__tick_scales.append((tick, tick_scale))
      # pylint: enable=protected-access

  # Populate instrument events by first gathering notes and other event types
  # in lists then write them sorted to the PrettyMidi object.
  instrument_events = defaultdict(lambda: defaultdict(list))
  for seq_note in sequence.notes:
    instrument_events[(seq_note.instrument, seq_note.program)]['notes'].append(
        pretty_midi.Note(seq_note.velocity, seq_note.pitch,
                         seq_note.start_time, seq_note.end_time))
  for seq_bend in sequence.pitch_bends:
    instrument_events[(seq_bend.instrument, seq_bend.program)]['bends'].append(
        pretty_midi.PitchBend(seq_bend.bend, seq_bend.time))
  for seq_cc in sequence.control_changes:
    instrument_events[(seq_cc.instrument, seq_cc.program)]['controls'].append(
        pretty_midi.ControlChange(seq_cc.control_number,
                                  seq_cc.control_value, seq_cc.time))

  for (instr_id, prog_id) in sorted(instrument_events.keys()):
    # For instr_id 0 append to the instrument created above.
    if instr_id > 0:
      instrument = pretty_midi.Instrument(prog_id, is_drum=(instr_id == 9))
      pm.instruments.append(instrument)
    instrument.program = prog_id
    instrument.notes = instrument_events[(instr_id, prog_id)]['notes']
    instrument.pitch_bends = instrument_events[(instr_id, prog_id)]['bends']
    instrument.control_changes = instrument_events[
        (instr_id, prog_id)]['controls']

  return pm
Beispiel #4
0
def make_music(
    pitches=60,
    durs=0.333,
    pgm=1,
    pan=64,
    is_drum=False,
    format="inbrowser",
    sr=16000,
    resolution=220,
):
    """Turn lists of numbers into music.

    Converts pitch and duration values into MIDI and/or audio playback. Uses
    `pretty_midi` for MIDI representation handling, fluidsynth for resynthesis,
    and `IPython.display.Audio` for browser playback.

    Parameters
    ----------
    pitches : list or scalar
        List of pitches, or scalar if constant pitch. Floating point values are
        interpreted as microtonal pitch deviations.
    durs: list or scalar
        List of durations, or scalar if constant duration.
    pgm: number
        MIDI program number, in range ``[0, 127]``.
    pan: number
        Pan value, in range ``[0, 127]``.
    is_drum : bool
        If True use percussion channel 10.
    format : string
        Which format to render sound to?
        - `'MIDI'` returns MIDI as a `pretty_midi` object
        - `'audio'` returns waveforms as a `numpy` nd.array
        - `'inbrowser'` returns `IPython.display.Audio` widget
        - `'autoplay'` returns `IPython.display.Audio` widget and plays it

    Returns
    -------
    synthesized: depends on the value of `format`.

    Notes
    -----
    If len(pitches) and len(durs) do not match, the smaller list is extended to
    match the length of the longer list by repeating the last value.
    """

    # check and convert to list if needed
    pitches = pitches if isinstance(pitches, list) else [pitches]
    durs = durs if isinstance(durs, list) else [durs]

    # extend short lists if size mismatch
    max_length = max(len(pitches), len(durs))
    pitches += [pitches[-1]] * (max_length - len(pitches))
    durs += [durs[-1]] * (max_length - len(durs))

    # create a PrettyMIDI score
    score = pretty_midi.PrettyMIDI(resolution=resolution)

    # create a list of instruments one for each voice (for polypohonic pitch bend)
    num_voices = max([len(p) if isinstance(p, list) else 1 for p in pitches])
    ins = [
        pretty_midi.Instrument(program=max(pgm - 1, 0), is_drum=is_drum)
        for i in range(num_voices)
    ]

    # apply pan to all instruments
    for instrument in ins:
        cc = pretty_midi.ControlChange(10, pan, 0)
        instrument.control_changes.append(cc)

    # iterate through music
    now_time = 0
    for pitch, dur in zip(pitches, durs):

        # rest if pitch is None
        if pitch is not None:

            # convert to list if needed
            pitch = pitch if isinstance(pitch, list) else [pitch]

            # loop through each voice of the list
            for voice_index, pitch_val in enumerate(pitch):

                # split into 12tet and microtones
                micros, twlvtet = math.modf(pitch_val)

                # create a new note
                note = pretty_midi.Note(
                    velocity=100,
                    pitch=int(twlvtet),
                    start=now_time,
                    end=now_time + dur,
                )

                # and add it to the instrument
                ins[voice_index].notes.append(note)

                # if microtonal
                if micros != 0:

                    # create a new pitch bend
                    # note: 4096 is a semitone in standard MIDI +/-2 pitchbend range
                    micropitch = pretty_midi.PitchBend(pitch=int(
                        round(micros * 4096)),
                                                       time=now_time)

                    # and add it to the instrument
                    ins[voice_index].pitch_bends.append(micropitch)

        # advance time
        now_time += dur

    # add instrument to the score
    score.instruments.extend(ins)

    # which format to render
    if format == "MIDI":
        return score
    elif format == "audio":
        return score.fluidsynth(fs=sr)
    elif format == "inbrowser":
        return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr)
    elif format == "autoplay":
        return IPython.display.Audio(score.fluidsynth(fs=sr),
                                     rate=sr,
                                     autoplay=True)
    else:
        raise ValueError(
            "Your `format` argument did not match one of the available options"
        )
Beispiel #5
0
pm = pretty_midi.PrettyMIDI(initial_tempo=80)

inst = pretty_midi.Instrument(program=42, is_drum=False, name='my cello')
pm.instruments.append(inst)

velocity = 100
for pitch, start, end in zip([60, 62, 64], [0.2, 0.6, 1.0], [1.1, 1.7, 2.3]):
    inst.notes.append(pretty_midi.Note(velocity, pitch, start, end))
print(inst.notes)

# We'll just do a 1-semitone pitch ramp up
n_steps = 512
bend_range = 8192//2
for time, pitch in zip(np.linspace(1.5, 2.3, n_steps),
                       range(0, bend_range, bend_range//n_steps)):
    inst.pitch_bends.append(pretty_midi.PitchBend(pitch, time))

def plot_piano_roll(pm, start_pitch, end_pitch, fs=100):
    # Use librosa's specshow function for displaying the piano roll
    librosa.display.specshow(pm.get_piano_roll(fs)[start_pitch:end_pitch],
                             hop_length=1, sr=fs, x_axis='time', y_axis='cqt_note',
                             fmin=pretty_midi.note_number_to_hz(start_pitch))

plt.figure(figsize=(8, 4))
plot_piano_roll(pm, 56, 70)


pm = pretty_midi.PrettyMIDI('example.mid')

plt.figure(figsize=(12, 4))
plot_piano_roll(pm, 24, 84)
def test_df_to_midi():
    df = pd.DataFrame(
        {
            "onset": 0,
            "track": [0, 0, 1],
            "pitch": [10, 20, 30],
            "dur": 1000,
            "velocity": 50,
        }
    )

    # Test basic writing
    fileio.df_to_midi(df, "test.mid")
    assert fileio.midi_to_df("test.mid").equals(
        df
    ), "Writing df to MIDI and reading changes df."

    # Test that writing should overwrite existing notes
    df.pitch += 10
    fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid")
    assert fileio.midi_to_df("test2.mid").equals(
        df
    ), "Writing df to MIDI with existing MIDI does not overwrite notes."

    # Test that writing skips non-overwritten notes
    fileio.df_to_midi(
        df, "test2.mid", existing_midi_path="test.mid", excerpt_start=1000
    )
    expected = pd.DataFrame(
        {
            "onset": [0, 0, 0, 1000, 1000, 1000],
            "track": [0, 0, 1, 0, 0, 1],
            "pitch": [10, 20, 30, 20, 30, 40],
            "dur": 1000,
            "velocity": 50,
        }
    )
    assert fileio.midi_to_df("test2.mid").equals(
        expected
    ), "Writing to MIDI doesn't copy notes before excerpt_start"

    # Test that writing skips non-overwritten notes past end
    fileio.df_to_midi(
        df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000
    )
    expected = pd.DataFrame(
        {
            "onset": [0, 0, 0, 1000, 1000, 1000],
            "track": [0, 0, 1, 0, 0, 1],
            "pitch": [20, 30, 40, 20, 30, 40],
            "dur": 1000,
            "velocity": 50,
        }
    )
    assert fileio.midi_to_df("test.mid").equals(
        expected
    ), "Writing to MIDI doesn't copy notes after excerpt_length"

    df.track = 2
    fileio.df_to_midi(
        df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000
    )
    expected = pd.DataFrame(
        {
            "onset": [0, 0, 0, 1000, 1000, 1000],
            "track": [2, 2, 2, 0, 0, 1],
            "pitch": [20, 30, 40, 20, 30, 40],
            "dur": 1000,
            "velocity": 50,
        }
    )
    assert fileio.midi_to_df("test.mid").equals(
        expected
    ), "Writing to MIDI with extra track breaks"

    # Check all non-note events
    midi_obj = pretty_midi.PrettyMIDI("test.mid")
    midi_obj.instruments[0].name = "test"
    midi_obj.instruments[0].program = 100
    midi_obj.instruments[0].is_drum = True
    midi_obj.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 0))
    midi_obj.instruments[0].control_changes.append(pretty_midi.ControlChange(10, 10, 0))
    midi_obj.lyrics.append(pretty_midi.Lyric("test", 0))
    midi_obj.time_signature_changes.append(pretty_midi.TimeSignature(2, 4, 1))
    midi_obj.key_signature_changes.append(pretty_midi.KeySignature(5, 1))
    midi_obj.write("test.mid")

    fileio.df_to_midi(expected, "test2.mid", existing_midi_path="test.mid")
    assert fileio.midi_to_df("test2.mid").equals(expected)

    # Check non-note events and data here
    new_midi = pretty_midi.PrettyMIDI("test2.mid")

    for instrument, new_instrument in zip(midi_obj.instruments, new_midi.instruments):
        assert instrument.name == new_instrument.name
        assert instrument.program == new_instrument.program
        assert instrument.is_drum == new_instrument.is_drum
        for pb, new_pb in zip(instrument.pitch_bends, new_instrument.pitch_bends):
            assert pb.pitch == new_pb.pitch
            assert pb.time == new_pb.time
        for cc, new_cc in zip(
            instrument.control_changes, new_instrument.control_changes
        ):
            assert cc.number == new_cc.number
            assert cc.value == new_cc.value
            assert cc.time == new_cc.time

    for ks, new_ks in zip(
        midi_obj.key_signature_changes, new_midi.key_signature_changes
    ):
        assert ks.key_number == new_ks.key_number
        assert ks.time == new_ks.time

    for lyric, new_lyric in zip(midi_obj.lyrics, new_midi.lyrics):
        assert lyric.text == new_lyric.text
        assert lyric.time == new_lyric.time

    for ts, new_ts in zip(
        midi_obj.time_signature_changes, new_midi.time_signature_changes
    ):
        assert ts.numerator == new_ts.numerator
        assert ts.denominator == new_ts.denominator
        assert ts.time == new_ts.time

    for filename in ["test.mid", "test2.mid"]:
        try:
            os.remove(filename)
        except Exception:
            pass