def wrapping_chord(chord_events, beats_sec, to_chroma=False): chord_symbols = [] chord_track = pretty_midi.Instrument(program=0) init_chord = (root_heigest_note // 12) * 12 for chord in chord_events: if chord is None: continue # voicing chord = voicing(chord) # compress to chormagram comp = to_chromagram( chord['composition']) if to_chroma else chord['composition'] # shift to lowest root location (from 0) re_arr = chord['bass'] // 12 comp = comp - re_arr * 12 bass = chord['bass'] - re_arr * 12 # event_on/off start = chord['event_on'] * beats_sec end = chord['event_off'] * beats_sec # determine the initial location loc = init_chord + bass init_chord_ = (init_chord - 12) if loc > root_heigest_note else init_chord symbol = chord['symbol'] chord_symbols.append(pretty_midi.Lyric(symbol, start)) for note in comp: note_number = note + init_chord_ note = pretty_midi.Note(velocity=100, pitch=int(note_number), start=start, end=end) chord_track.notes.append(note) return chord_track, chord_symbols
def test_get_end_time(): pm = pretty_midi.PrettyMIDI() inst = pretty_midi.Instrument(0) pm.instruments.append(inst) # When no events, end time should be 0 assert pm.get_end_time() == 0 # End time should be sensitive to inst notes, pitch bends, control changes inst.notes.append( pretty_midi.Note(start=0.5, end=1.7, pitch=30, velocity=100)) assert np.allclose(pm.get_end_time(), 1.7) inst.pitch_bends.append(pretty_midi.PitchBend(pitch=100, time=1.9)) assert np.allclose(pm.get_end_time(), 1.9) inst.control_changes.append( pretty_midi.ControlChange(number=0, value=10, time=2.1)) assert np.allclose(pm.get_end_time(), 2.1) # End time should be sensitive to meta events pm.time_signature_changes.append( pretty_midi.TimeSignature(numerator=4, denominator=4, time=2.3)) assert np.allclose(pm.get_end_time(), 2.3) pm.time_signature_changes.append( pretty_midi.KeySignature(key_number=10, time=2.5)) assert np.allclose(pm.get_end_time(), 2.5) pm.time_signature_changes.append(pretty_midi.Lyric(text='hey', time=2.7)) assert np.allclose(pm.get_end_time(), 2.7)
def test_df_to_midi(): df = pd.DataFrame({ "onset": 0, "track": [0, 0, 1], "pitch": [10, 20, 30], "dur": 1000 }) # Test basic writing fileio.df_to_midi(df, "test.mid") assert fileio.midi_to_df("test.mid").equals( df), "Writing df to MIDI and reading changes df." # Test that writing should overwrite existing notes df.pitch += 10 fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals( df), "Writing df to MIDI with existing MIDI does not overwrite notes." # Test that writing skips non-overwritten notes fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid", excerpt_start=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [10, 20, 30, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test2.mid").equals( expected), "Writing to MIDI doesn't copy notes before excerpt_start" # Test that writing skips non-overwritten notes past end fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI doesn't copy notes after excerpt_length" df.track = 2 fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [2, 2, 2, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI with extra track breaks" # Check all non-note events midi_obj = pretty_midi.PrettyMIDI("test.mid") midi_obj.instruments[0].name = "test" midi_obj.instruments[0].program = 100 midi_obj.instruments[0].is_drum = True midi_obj.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 0)) midi_obj.instruments[0].control_changes.append( pretty_midi.ControlChange(10, 10, 0)) midi_obj.lyrics.append(pretty_midi.Lyric("test", 0)) midi_obj.time_signature_changes.append(pretty_midi.TimeSignature(2, 4, 1)) midi_obj.key_signature_changes.append(pretty_midi.KeySignature(5, 1)) midi_obj.write("test.mid") fileio.df_to_midi(expected, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals(expected) # Check non-note events and data here new_midi = pretty_midi.PrettyMIDI("test2.mid") for instrument, new_instrument in zip(midi_obj.instruments, new_midi.instruments): assert instrument.name == new_instrument.name assert instrument.program == new_instrument.program assert instrument.is_drum == new_instrument.is_drum for pb, new_pb in zip(instrument.pitch_bends, new_instrument.pitch_bends): assert pb.pitch == new_pb.pitch assert pb.time == new_pb.time for cc, new_cc in zip(instrument.control_changes, new_instrument.control_changes): assert cc.number == new_cc.number assert cc.value == new_cc.value assert cc.time == new_cc.time for ks, new_ks in zip(midi_obj.key_signature_changes, new_midi.key_signature_changes): assert ks.key_number == new_ks.key_number assert ks.time == new_ks.time for lyric, new_lyric in zip(midi_obj.lyrics, new_midi.lyrics): assert lyric.text == new_lyric.text assert lyric.time == new_lyric.time for ts, new_ts in zip(midi_obj.time_signature_changes, new_midi.time_signature_changes): assert ts.numerator == new_ts.numerator assert ts.denominator == new_ts.denominator assert ts.time == new_ts.time for filename in ["test.mid", "test2.mid"]: try: os.remove(filename) except Exception: pass
def note_sequence_to_pretty_midi(sequence, drop_events_n_seconds_after_last_note=None): """Convert NoteSequence to a PrettyMIDI. Time is stored in the NoteSequence in absolute values (seconds) as opposed to relative values (MIDI ticks). When the NoteSequence is translated back to PrettyMIDI the absolute time is retained. The tempo map is also recreated. Args: sequence: A NoteSequence. drop_events_n_seconds_after_last_note: Events (e.g., time signature changes) that occur this many seconds after the last note will be dropped. If None, then no events will be dropped. Returns: A pretty_midi.PrettyMIDI object or None if sequence could not be decoded. """ ticks_per_quarter = sequence.ticks_per_quarter or constants.STANDARD_PPQ max_event_time = None if drop_events_n_seconds_after_last_note is not None: max_event_time = (max([n.end_time for n in sequence.notes] or [0]) + drop_events_n_seconds_after_last_note) # Try to find a tempo at time zero. The list is not guaranteed to be in order. initial_seq_tempo = None for seq_tempo in sequence.tempos: if seq_tempo.time == 0: initial_seq_tempo = seq_tempo break kwargs = {} if initial_seq_tempo: kwargs['initial_tempo'] = initial_seq_tempo.qpm else: kwargs['initial_tempo'] = constants.DEFAULT_QUARTERS_PER_MINUTE pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs) # Create an empty instrument to contain time and key signatures. instrument = pretty_midi.Instrument(0) pm.instruments.append(instrument) # Populate time signatures. for seq_ts in sequence.time_signatures: if max_event_time and seq_ts.time > max_event_time: continue time_signature = pretty_midi.containers.TimeSignature( seq_ts.numerator, seq_ts.denominator, seq_ts.time) pm.time_signature_changes.append(time_signature) # Populate key signatures. for seq_key in sequence.key_signatures: if max_event_time and seq_key.time > max_event_time: continue key_number = seq_key.key if seq_key.mode == seq_key.MINOR: key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET key_signature = pretty_midi.containers.KeySignature( key_number, seq_key.time) pm.key_signature_changes.append(key_signature) # Populate tempos. # TODO(douglaseck): Update this code if pretty_midi adds the ability to # write tempo. for seq_tempo in sequence.tempos: # Skip if this tempo was added in the PrettyMIDI constructor. if seq_tempo == initial_seq_tempo: continue if max_event_time and seq_tempo.time > max_event_time: continue tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm) tick = pm.time_to_tick(seq_tempo.time) # pylint: disable=protected-access pm._tick_scales.append((tick, tick_scale)) pm._update_tick_to_time(0) # pylint: enable=protected-access # Populate instrument names by first creating an instrument map between # instrument index and name. # Then, going over this map in the instrument event for loop inst_infos = {} for inst_info in sequence.instrument_infos: inst_infos[inst_info.instrument] = inst_info.name # Populate instrument events by first gathering notes and other event types # in lists then write them sorted to the PrettyMidi object. instrument_events = collections.defaultdict( lambda: collections.defaultdict(list)) for seq_note in sequence.notes: instrument_events[(seq_note.instrument, seq_note.program, seq_note.is_drum)]['notes'].append( pretty_midi.Note(seq_note.velocity, seq_note.pitch, seq_note.start_time, seq_note.end_time)) for seq_bend in sequence.pitch_bends: if max_event_time and seq_bend.time > max_event_time: continue instrument_events[(seq_bend.instrument, seq_bend.program, seq_bend.is_drum)]['bends'].append( pretty_midi.PitchBend(seq_bend.bend, seq_bend.time)) for seq_cc in sequence.control_changes: if max_event_time and seq_cc.time > max_event_time: continue instrument_events[(seq_cc.instrument, seq_cc.program, seq_cc.is_drum)]['controls'].append( pretty_midi.ControlChange( seq_cc.control_number, seq_cc.control_value, seq_cc.time)) for ta in sequence.text_annotations: from magenta.music.chords_lib import CHORD_SYMBOL if ta.annotation_type == CHORD_SYMBOL and ta.text != constants.NO_CHORD: pm.lyrics.append(pretty_midi.Lyric(ta.text, ta.time)) # timing_track.append(mido.MetaMessage( # 'end_of_track', time=timing_track[-1].time + 1)) for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()): # For instr_id 0 append to the instrument created above. if instr_id > 0: instrument = pretty_midi.Instrument(prog_id, is_drum) pm.instruments.append(instrument) else: instrument.is_drum = is_drum # propagate instrument name to the midi file instrument.program = prog_id if instr_id in inst_infos: instrument.name = inst_infos[instr_id] instrument.notes = instrument_events[(instr_id, prog_id, is_drum)]['notes'] instrument.pitch_bends = instrument_events[(instr_id, prog_id, is_drum)]['bends'] instrument.control_changes = instrument_events[(instr_id, prog_id, is_drum)]['controls'] return pm
start=note['on'] / 1000, end=note['off'] / 1000) melody_track.notes.append(_note) # chord track chord_track = pretty_midi.Instrument(program=0) chord_start = ( start_measure + 1) * measure_sec if incomplete_start else start_measure * measure_sec for i, chord in enumerate(chords): start = chord_start + i * measure_sec end = start + measure_sec lyric = pretty_midi.Lyric(text=chord, time=start) midi.lyrics.append(lyric) previous = -1 _chords = Chord(chord).components() _chords = [NOTE_VAL_DICT.get(c) for c in _chords] for c in _chords: if c < previous: c += 12 note = pretty_midi.Note(velocity=CHORD_VELOCITY, pitch=int(CHORD_INIT_NOTE + c), start=start, end=end) previous = c chord_track.notes.append(note)