def proc_to_midi(
        melody_events,
        chord_events,
        key='C',
        to_chroma=False,
        bpm=120,
        beats_in_measure=4,
        save_path='./',
        name='test'):

    bpm = float(bpm)
    if bpm == 0.0:
        bpm = 120

    beats_in_measure = int(beats_in_measure)
    lead_sheet = pretty_midi.PrettyMIDI(initial_tempo=bpm)
    beats_sec = 60.0 / bpm

    chord_track, chord_symbols = wrapping_chord(chord_events, beats_sec, to_chroma=to_chroma)
    melody_track = wrapping_melody(melody_events, beats_sec)
    ts = pretty_midi.TimeSignature(beats_in_measure, 4, 0)
    ks = pretty_midi.KeySignature(get_key_offset(key), 0)

    lead_sheet.time_signature_changes.append(ts)
    lead_sheet.key_signature_changes.append(ks)

    lead_sheet.instruments.append(melody_track)
    lead_sheet.instruments.append(chord_track)
    lead_sheet.lyrics = chord_symbols

    if not os.path.exists(save_path):
        os.makedirs(save_path)
    filename = os.path.join(save_path, name+'.mid')
    lead_sheet.write(filename)
    return filename
Beispiel #2
0
def proc_to_midi(melody_events,
                 chord_events,
                 chord_dic,
                 key='C',
                 to_chroma=False,
                 bpm=120,
                 beats_in_measure=4):

    bpm = float(bpm)
    if bpm == 0.0:
        bpm = 120

    beats_in_measure = int(beats_in_measure)
    beats_sec = 60.0 / bpm

    lead_sheet = pretty_midi.PrettyMIDI(initial_tempo=bpm)

    chord_track = wrapping_chord(chord_events, chord_dic, beats_sec)
    melody_track = wrapping_melody(melody_events, beats_sec)
    ts = pretty_midi.TimeSignature(beats_in_measure, 4, 0)
    ks = pretty_midi.KeySignature(get_key_offset(key), 0)

    lead_sheet.time_signature_changes.append(ts)
    lead_sheet.key_signature_changes.append(ks)

    lead_sheet.instruments.append(melody_track)
    lead_sheet.instruments.append(chord_track)

    return lead_sheet
Beispiel #3
0
def transpose(pm):
    """Transposes all instruments to new key based on new_key_number.
    Adds a new key signature event if no event is present.
    Parameters
    ----------
    new_key_number: int
    key number accordingly to [0,11] Major, [12,23] minor
    For example, 0 is C Major, 12 is C minorTimes to map from
    """
    # Add default key signature of C major if none is present.
    if not pm.key_signature_changes:
        default_key_signature = pretty_midi.KeySignature(0, 0)
        pm.key_signature_changes.append(default_key_signature)

    for i in range(len(pm.key_signature_changes)):
        key_sig = pm.key_signature_changes[i]
        if key_sig.key_number != 0:
            start_time = key_sig.time
            # Look ahead to next key signature event, if any, to get end time.
            if i < len(pm.key_signature_changes) - 1:
                end_time = pm.key_signature_changes[i + 1].time
            else:
                end_time = float('inf')

            # key_offset = new_key_number - key_sig.key_number
            key_offset = -(key_sig.key_number % 12)
            # Move up or down based on which yields a smaller delta.
            if key_offset < -6:
                key_offset += 12
            if key_offset > 6:
                key_offset -= 1
            for instrument in pm.instruments:
                if not instrument.is_drum:
                    for note in instrument.notes:
                        if note.start >= start_time and note.start < end_time:
                            note.pitch += key_offset
            # Update the key signature number.
            key_sig.key_number = 0
Beispiel #4
0
def test_get_end_time():
    pm = pretty_midi.PrettyMIDI()
    inst = pretty_midi.Instrument(0)
    pm.instruments.append(inst)
    # When no events, end time should be 0
    assert pm.get_end_time() == 0
    # End time should be sensitive to inst notes, pitch bends, control changes
    inst.notes.append(
        pretty_midi.Note(start=0.5, end=1.7, pitch=30, velocity=100))
    assert np.allclose(pm.get_end_time(), 1.7)
    inst.pitch_bends.append(pretty_midi.PitchBend(pitch=100, time=1.9))
    assert np.allclose(pm.get_end_time(), 1.9)
    inst.control_changes.append(
        pretty_midi.ControlChange(number=0, value=10, time=2.1))
    assert np.allclose(pm.get_end_time(), 2.1)
    # End time should be sensitive to meta events
    pm.time_signature_changes.append(
        pretty_midi.TimeSignature(numerator=4, denominator=4, time=2.3))
    assert np.allclose(pm.get_end_time(), 2.3)
    pm.time_signature_changes.append(
        pretty_midi.KeySignature(key_number=10, time=2.5))
    assert np.allclose(pm.get_end_time(), 2.5)
    pm.time_signature_changes.append(pretty_midi.Lyric(text='hey', time=2.7))
    assert np.allclose(pm.get_end_time(), 2.7)
Beispiel #5
0
def test_adjust_times():
    # Simple tests for adjusting note times
    def simple():
        pm = pretty_midi.PrettyMIDI()
        i = pretty_midi.Instrument(0)
        # Create 9 notes, at times [1, 2, 3, 4, 5, 6, 7, 8, 9]
        for n, start in enumerate(range(1, 10)):
            i.notes.append(pretty_midi.Note(100, 100 + n, start, start + .5))
        pm.instruments.append(i)
        return pm

    # Test notes are interpolated as expected
    pm = simple()
    pm.adjust_times([0, 10], [5, 20])
    for note, start in zip(pm.instruments[0].notes,
                           1.5 * np.arange(1, 10) + 5):
        assert note.start == start
    # Test notes are all ommitted when adjustment range doesn't cover them
    pm = simple()
    pm.adjust_times([10, 20], [5, 10])
    assert len(pm.instruments[0].notes) == 0
    # Test repeated mapping times
    pm = simple()
    pm.adjust_times([0, 5, 6.5, 10], [5, 10, 10, 17])
    # Original times  [1, 2, 3, 4,  7,  8,  9]
    # The notes at times 5 and 6 have their durations squashed to zero
    expected_starts = [6, 7, 8, 9, 11, 13, 15]
    assert np.allclose([n.start for n in pm.instruments[0].notes],
                       expected_starts)
    pm = simple()
    pm.adjust_times([0, 5, 5, 10], [7, 12, 13, 17])
    # Original times  [1, 2, 3, 4,  5,  6,  7,  8,  9]
    expected_starts = [8, 9, 10, 11, 12, 13, 14, 15, 16]
    assert np.allclose([n.start for n in pm.instruments[0].notes],
                       expected_starts)

    # Complicated example
    pm = simple()
    # Include pitch bends and control changes to test adjust_events
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(100, 1.))
    # Include event which fall within omitted region
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(200, 7.))
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(0, 7.1))
    # Include event which falls outside of the track
    pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 10.))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 0, .5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 1, 5.5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 2, 7.5))
    pm.instruments[0].control_changes.append(
        pretty_midi.ControlChange(0, 3, 20.))
    # Include track-level meta events to test adjust_meta
    pm.time_signature_changes.append(pretty_midi.TimeSignature(3, 4, .1))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(4, 4, 5.2))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(6, 4, 6.2))
    pm.time_signature_changes.append(pretty_midi.TimeSignature(5, 4, 15.3))
    pm.key_signature_changes.append(pretty_midi.KeySignature(1, 1.))
    pm.key_signature_changes.append(pretty_midi.KeySignature(2, 6.2))
    pm.key_signature_changes.append(pretty_midi.KeySignature(3, 7.2))
    pm.key_signature_changes.append(pretty_midi.KeySignature(4, 12.3))
    # Add in tempo changes - 100 bpm at 0s
    pm._tick_scales[0] = (0, 60. / (100 * pm.resolution))
    # 110 bpm at 6s
    pm._tick_scales.append((2200, 60. / (110 * pm.resolution)))
    # 120 bpm at 8.1s
    pm._tick_scales.append((3047, 60. / (120 * pm.resolution)))
    # 150 bpm at 8.3s
    pm._tick_scales.append((3135, 60. / (150 * pm.resolution)))
    # 80 bpm at 9.3s
    pm._tick_scales.append((3685, 60. / (80 * pm.resolution)))
    pm._update_tick_to_time(20000)

    # Adjust times, with a collapsing section in original and new times
    pm.adjust_times([2., 3.1, 3.1, 5.1, 7.5, 10], [5., 6., 7., 8.5, 8.5, 11])

    # Original tempo change times: [0, 6, 8.1, 8.3, 9.3]
    # Plus tempo changes at each of new_times which are not collapsed
    # Plus tempo change at 0s by default
    expected_times = [
        0., 5., 6., 8.5, 8.5 + (6 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (8.1 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (8.3 - 5.1) * (11 - 8.5) / (10 - 5.1),
        8.5 + (9.3 - 5.1) * (11 - 8.5) / (10 - 5.1)
    ]
    # Tempos scaled by differences in timing, plus 120 bpm at the beginning
    expected_tempi = [
        120., 100 * (3.1 - 2) / (6 - 5), 100 * (5.1 - 3.1) / (8.5 - 6),
        100 * (10 - 5.1) / (11 - 8.5), 110 * (10 - 5.1) / (11 - 8.5),
        120 * (10 - 5.1) / (11 - 8.5), 150 * (10 - 5.1) / (11 - 8.5),
        80 * (10 - 5.1) / (11 - 8.5)
    ]
    change_times, tempi = pm.get_tempo_changes()
    # Due to the fact that tempo change times must occur at discrete ticks, we
    # must raise the relative tolerance when comparing
    assert np.allclose(expected_times, change_times, rtol=.001)
    assert np.allclose(expected_tempi, tempi, rtol=.002)

    # Test that all other events were interpolated as expected
    note_starts = [
        5.0, 5 + 1 / 1.1, 6 + .9 / (2 / 2.5), 6 + 1.9 / (2 / 2.5), 8.5 + .5,
        8.5 + 1.5
    ]
    note_ends = [
        5 + .5 / 1.1, 6 + .4 / (2 / 2.5), 6 + 1.4 / (2 / 2.5), 8.5, 8.5 + 1.,
        10 + .5
    ]
    note_pitches = [101, 102, 103, 104, 107, 108]
    for note, s, e, p in zip(pm.instruments[0].notes, note_starts, note_ends,
                             note_pitches):
        assert note.start == s
        assert note.end == e
        assert note.pitch == p

    bend_times = [5., 8.5, 8.5]
    bend_pitches = [100, 200, 0]
    for bend, t, p in zip(pm.instruments[0].pitch_bends, bend_times,
                          bend_pitches):
        assert bend.time == t
        assert bend.pitch == p

    cc_times = [5., 8.5, 8.5]
    cc_values = [0, 1, 2]
    for cc, t, v in zip(pm.instruments[0].control_changes, cc_times,
                        cc_values):
        assert cc.time == t
        assert cc.value == v

    # The first time signature change will be placed at the first interpolated
    # downbeat location - so, start by computing the location of the first
    # downbeat after the start of original_times, then interpolate it
    first_downbeat_after = .1 + 2 * 3 * 60. / 100.
    first_ts_time = 6. + (first_downbeat_after - 3.1) / (2. / 2.5)
    ts_times = [first_ts_time, 8.5, 8.5]
    ts_numerators = [3, 4, 6]
    for ts, t, n in zip(pm.time_signature_changes, ts_times, ts_numerators):
        assert np.isclose(ts.time, t)
        assert ts.numerator == n

    ks_times = [5., 8.5, 8.5]
    ks_keys = [1, 2, 3]
    for ks, t, k in zip(pm.key_signature_changes, ks_times, ks_keys):
        assert ks.time == t
        assert ks.key_number == k
def test_df_to_midi():
    df = pd.DataFrame({
        "onset": 0,
        "track": [0, 0, 1],
        "pitch": [10, 20, 30],
        "dur": 1000
    })

    # Test basic writing
    fileio.df_to_midi(df, "test.mid")
    assert fileio.midi_to_df("test.mid").equals(
        df), "Writing df to MIDI and reading changes df."

    # Test that writing should overwrite existing notes
    df.pitch += 10
    fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid")
    assert fileio.midi_to_df("test2.mid").equals(
        df), "Writing df to MIDI with existing MIDI does not overwrite notes."

    # Test that writing skips non-overwritten notes
    fileio.df_to_midi(df,
                      "test2.mid",
                      existing_midi_path="test.mid",
                      excerpt_start=1000)
    expected = pd.DataFrame({
        "onset": [0, 0, 0, 1000, 1000, 1000],
        "track": [0, 0, 1, 0, 0, 1],
        "pitch": [10, 20, 30, 20, 30, 40],
        "dur": 1000,
    })
    assert fileio.midi_to_df("test2.mid").equals(
        expected), "Writing to MIDI doesn't copy notes before excerpt_start"

    # Test that writing skips non-overwritten notes past end
    fileio.df_to_midi(df,
                      "test.mid",
                      existing_midi_path="test2.mid",
                      excerpt_length=1000)
    expected = pd.DataFrame({
        "onset": [0, 0, 0, 1000, 1000, 1000],
        "track": [0, 0, 1, 0, 0, 1],
        "pitch": [20, 30, 40, 20, 30, 40],
        "dur": 1000,
    })
    assert fileio.midi_to_df("test.mid").equals(
        expected), "Writing to MIDI doesn't copy notes after excerpt_length"

    df.track = 2
    fileio.df_to_midi(df,
                      "test.mid",
                      existing_midi_path="test2.mid",
                      excerpt_length=1000)
    expected = pd.DataFrame({
        "onset": [0, 0, 0, 1000, 1000, 1000],
        "track": [2, 2, 2, 0, 0, 1],
        "pitch": [20, 30, 40, 20, 30, 40],
        "dur": 1000,
    })
    assert fileio.midi_to_df("test.mid").equals(
        expected), "Writing to MIDI with extra track breaks"

    # Check all non-note events
    midi_obj = pretty_midi.PrettyMIDI("test.mid")
    midi_obj.instruments[0].name = "test"
    midi_obj.instruments[0].program = 100
    midi_obj.instruments[0].is_drum = True
    midi_obj.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 0))
    midi_obj.instruments[0].control_changes.append(
        pretty_midi.ControlChange(10, 10, 0))
    midi_obj.lyrics.append(pretty_midi.Lyric("test", 0))
    midi_obj.time_signature_changes.append(pretty_midi.TimeSignature(2, 4, 1))
    midi_obj.key_signature_changes.append(pretty_midi.KeySignature(5, 1))
    midi_obj.write("test.mid")

    fileio.df_to_midi(expected, "test2.mid", existing_midi_path="test.mid")
    assert fileio.midi_to_df("test2.mid").equals(expected)

    # Check non-note events and data here
    new_midi = pretty_midi.PrettyMIDI("test2.mid")

    for instrument, new_instrument in zip(midi_obj.instruments,
                                          new_midi.instruments):
        assert instrument.name == new_instrument.name
        assert instrument.program == new_instrument.program
        assert instrument.is_drum == new_instrument.is_drum
        for pb, new_pb in zip(instrument.pitch_bends,
                              new_instrument.pitch_bends):
            assert pb.pitch == new_pb.pitch
            assert pb.time == new_pb.time
        for cc, new_cc in zip(instrument.control_changes,
                              new_instrument.control_changes):
            assert cc.number == new_cc.number
            assert cc.value == new_cc.value
            assert cc.time == new_cc.time

    for ks, new_ks in zip(midi_obj.key_signature_changes,
                          new_midi.key_signature_changes):
        assert ks.key_number == new_ks.key_number
        assert ks.time == new_ks.time

    for lyric, new_lyric in zip(midi_obj.lyrics, new_midi.lyrics):
        assert lyric.text == new_lyric.text
        assert lyric.time == new_lyric.time

    for ts, new_ts in zip(midi_obj.time_signature_changes,
                          new_midi.time_signature_changes):
        assert ts.numerator == new_ts.numerator
        assert ts.denominator == new_ts.denominator
        assert ts.time == new_ts.time

    for filename in ["test.mid", "test2.mid"]:
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #7
0
    # normalized
    pitch_matrix = (pitch_matrix.transpose() /
                    pitch_matrix.transpose().sum(axis=0)).transpose()

    # HMM chord generator
    chords = generate_chords(pitch_matrix)

    # transform to pychord's Chord
    chords = [SYMBOL_CHORD_DICT.get(c) for c in chords]

    # generate a midi combining melody and chord
    midi = pretty_midi.PrettyMIDI(initial_tempo=bpm)
    ts = pretty_midi.TimeSignature(time_sig, 4, 0)

    # TODO: C key default now,support key trans in future
    ks = pretty_midi.KeySignature(0, 0)

    midi.time_signature_changes.append(ts)
    midi.key_signature_changes.append(ks)

    # melody track
    melody_track = pretty_midi.Instrument(program=0)
    for i in range(len(df_notes)):
        note = df_notes.iloc[i]
        pitch = MELODY_INIT_NOTE + YMLNOTE_VAL_DICT.get(
            note['key']) + 12 * note['octave']
        _note = pretty_midi.Note(velocity=MELODY_VELOCITY,
                                 pitch=int(pitch),
                                 start=note['on'] / 1000,
                                 end=note['off'] / 1000)