def cut_midi(midi_data, start, end): new_midi = pm.PrettyMIDI(resolution=480) for instr in midi_data.instruments: new_instr = pm.Instrument( program=pm.instrument_name_to_program('Acoustic Grand Piano'), name=instr.name) for note in instr.notes: add_note = False if note.start <= start: if note.end > start: # Note starts before start and ends after start, there is overlap add_note = True else: #Note starts and ends before start, no overlap pass else: if note.start < end: # Note starts between start and end, regardless of end there is overlap add_note = True else: # Note starts after end, no overlap pass if add_note: new_note = pm.Note(note.velocity, note.pitch, max(0, note.start - start), min(end - start, note.end - start)) if (new_note.start == 0 and new_note.end-new_note.start < 0.05) \ or (new_note.end == end-start and new_note.end-new_note.start < 0.05): # Do not add short notes at start or end of section # that are due to imprecision in cutting. pass else: new_instr.notes.append(new_note) ccs_sorted = sorted(instr.control_changes, key=lambda x: x.time) cc64_on = False for cc in ccs_sorted: #Only keep sustain pedal if cc.number == 64: # Check if CC64 was on before start if cc.time < start: cc64_on = cc.value > 64 elif cc.time >= end: break else: if cc.time != start and cc64_on: # Add extra CC to put sustain on new_cc = pm.ControlChange(cc.number, cc.value, 0) new_instr.control_changes.append(new_cc) # Add extra CC just the first time cc64_on = False new_cc = pm.ControlChange(cc.number, cc.value, cc.time - start) new_instr.control_changes.append(new_cc) new_midi.instruments.append(new_instr) return new_midi
def xml_pedals_to_midi_pedals(xml_pedals): midi_pedals = [] for pedal in xml_pedals: midi_pedal = pretty_midi.ControlChange(number=pedal.number, value=pedal.value, time=pedal.time) midi_pedals.append(midi_pedal) return midi_pedals
def note_played_for_frame(start_time, events_coo): ''' 对于一帧图像,其中事件个数在指定高度范围内的数量大于设定阈值,则该区域存在音符 :param start_time: :param events_coo: :return: ''' e_pitch = events_coo[:, 1] // ( configs['height'] // (configs['oct_num'] * 12)) # 表示一个事件高度对应哪一个音高 # e_std = np.std(events_coo[:, 0], ddof=1) # print(f'e_std: {e_std}') pitch_counter = Counter(e_pitch) # 去噪,如果总事件数低于设定值,则证明该帧图像为噪声 event_num = sum(pitch_counter.values()) if event_num < configs['total_event_thres']: return None, None # 得到所有符合条件(事件数大于阈值)的音高序列 pitch_played_index = [] for item in Counter(e_pitch).items(): if item[1] > configs['part_event_thres']: pitch_played_index.append(item[0]) # 注意:高度与音高是正好相反的 pitch_played_index = np.sort((configs['oct_num'] * 12) - np.array(pitch_played_index)) if len(pitch_played_index) == 0: return None, None # 求出最高音与最低音的差,查表获得使用音符 pitch_diff = pitch_played_index[-1] - pitch_played_index[0] if configs['scale_type'] == 'Major': pitch_played = np.array(configs['pitch_major'][int( pitch_diff)]) + configs['root_pitch'] + int(pitch_played_index[0]) notes_list = [] for pitch in pitch_played: note = pretty_midi.Note(velocity=100, pitch=pitch, start=start_time / 1e6, end=(start_time + configs['frame_time'] * (len(configs['vel_thres']) + 1)) / 1e6) notes_list.append(note) # 加上control change事件 cc_value = int(np.mean(events_coo[:, 0]) * 127 / configs['width']) cc = pretty_midi.ControlChange(number=10, value=cc_value, time=start_time / 1e6) else: raise NotImplementedError return notes_list, cc
def HOV2pm(md, sub_beats=4): """go from HOV and tempo to pretty midi Arguments: md -- dictionary containing data for HOV and tempo... NOT a md object, as the name would imply sub_beats - number of sub beats used for quantizing """ H = md['H'] O = md['O'] V = md['V'] # add a column of zeros to the end of the training example, so that notes end sensibly R = np.concatenate((md['R'], np.zeros((1,md['R'].shape[-1])))) S = md['S'] # invert transform tempo. If handling of tempo when generating examples is changed, then this will need to change tempo = normalize_tempo(md['tempo'], inverse=True) beat_length = 60 / tempo[0] sub_beat_length = beat_length / sub_beats max_offset = sub_beat_length / 2 pm = pretty_midi.PrettyMIDI(resolution=960) pm.instruments.append(pretty_midi.Instrument(0, name='piano')) beats = [i * beat_length for i in range(len(H))] sub_beat_times = [i + j * sub_beat_length for i in beats for j in range(sub_beats)] for timestep in range(len(H)): for pitch in np.where(H[timestep] == 1)[0]: h = sub_beat_times[timestep] note_on = h + O[timestep, pitch] * max_offset # calculating note off: add h to the time until the next 0 in the piano roll note_off = h + np.where(R[timestep:, pitch] == 0)[0][0] * sub_beat_length noteM = pretty_midi.Note(velocity=int(V[timestep, pitch] * 127), pitch=pitch+21, start=note_on, end=note_off) pm.instruments[0].notes.append(noteM) # sort pedal if S[timestep, 0] == 1: pm.instruments[0].control_changes.append(pretty_midi.ControlChange(64, 0, sub_beat_times[timestep])) if S[timestep, 1] == 1: pm.instruments[0].control_changes.append(pretty_midi.ControlChange(64, 127, sub_beat_times[timestep])) if S[timestep, 1] == 1 and S[timestep, 0] == 1: print('simultaneous pedal events!') return pm
def midi_generator(notes, file_name, sustain='no'): notes = sorted(notes, key=lambda x: x[1]) score = pretty_midi.PrettyMIDI() piano_program = pretty_midi.instrument_name_to_program( 'Acoustic Grand Piano') piano1 = pretty_midi.Instrument(program=piano_program) piano2 = pretty_midi.Instrument(program=piano_program) piano2.pitch_bends.append(pretty_midi.PitchBend(pitch=-2048, time=0)) if sustain == 'yes': piano1.control_changes.append( pretty_midi.ControlChange(number=64, value=100, time=0)) piano2.control_changes.append( pretty_midi.ControlChange(number=64, value=100, time=0)) for n, note_ in enumerate(notes): if note_[1] == 0: note_[1] = 1e-20 if note_[0] % 2 == 0: for later_note in notes[n + 1:]: if later_note[0] == note_[0]: if later_note[1] <= note_[1] + note_[2]: note_[2] = 0.9 * (later_note[1] - note_[1]) note_ = pretty_midi.Note(velocity=note_[3], pitch=21 + (note_[0] / 2), start=note_[1], end=note_[1] + note_[2]) piano2.notes.append(note_) else: for later_note in notes[n + 1:]: if later_note[0] == note_[0]: if later_note[1] <= note_[1] + note_[2]: note_[2] = 0.9 * (later_note[1] - note_[1]) note_ = pretty_midi.Note(velocity=note_[3], pitch=21 + ((note_[0] - 1) / 2), start=note_[1], end=note_[1] + note_[2]) piano1.notes.append(note_) score.instruments.append(piano1) score.instruments.append(piano2) score.write(file_name)
def cluster_to_notes_and_cc(c_label, start): ''' :param c_label: :param start: :return: ''' # Control Change cc_value = get_cc_value(c_label, func='logistic') cc = pretty_midi.ControlChange(number=10, value=cc_value, time=start * configs['packet_time'] / 1e6) # Note velocity = get_note_velocity(c_label) note_off_param = get_note_off_param(c_label) pitch_max = int( np.max(c_label[:, 1]) / (configs['height'] / (configs['oct_num'] * 12))) pitch_min = int( np.min(c_label[:, 1]) / (configs['height'] / (configs['oct_num'] * 12))) pitch_mean = int((np.mean(c_label[:, 1])) / (configs['height'] / (configs['oct_num'] * 12))) pitch_played = np.array( configs['pitch_major'][pitch_max - pitch_min]) + configs['root_pitch'] + pitch_mean note_list = [] for pitch in pitch_played: note = pretty_midi.Note(velocity=velocity, pitch=pitch, start=start * configs['packet_time'] / 1e6, end=(start + note_off_param) * configs['packet_time'] / 1e6) note_list.append(note) return note_list, cc
def test_get_end_time(): pm = pretty_midi.PrettyMIDI() inst = pretty_midi.Instrument(0) pm.instruments.append(inst) # When no events, end time should be 0 assert pm.get_end_time() == 0 # End time should be sensitive to inst notes, pitch bends, control changes inst.notes.append( pretty_midi.Note(start=0.5, end=1.7, pitch=30, velocity=100)) assert np.allclose(pm.get_end_time(), 1.7) inst.pitch_bends.append(pretty_midi.PitchBend(pitch=100, time=1.9)) assert np.allclose(pm.get_end_time(), 1.9) inst.control_changes.append( pretty_midi.ControlChange(number=0, value=10, time=2.1)) assert np.allclose(pm.get_end_time(), 2.1) # End time should be sensitive to meta events pm.time_signature_changes.append( pretty_midi.TimeSignature(numerator=4, denominator=4, time=2.3)) assert np.allclose(pm.get_end_time(), 2.3) pm.time_signature_changes.append( pretty_midi.KeySignature(key_number=10, time=2.5)) assert np.allclose(pm.get_end_time(), 2.5) pm.time_signature_changes.append(pretty_midi.Lyric(text='hey', time=2.7)) assert np.allclose(pm.get_end_time(), 2.7)
def slice_midi(pm, beats, start_idx, end_idx): ''' Slice given pretty_midi object into number of beat segments. ''' new_pm = pretty_midi.PrettyMIDI() new_inst = pretty_midi.Instrument(program=pm.instruments[0].program, is_drum=pm.instruments[0].is_drum, name=pm.instruments[0].name) start, end = beats[start_idx], beats[end_idx] for i in range(len(pm.instruments)): for note in pm.instruments[i].notes: velocity, pitch = note.velocity, note.pitch if note.start > end or note.start < start: continue else: s = note.start - start if note.end > end: e = end - start else: e = note.end - start new_note = pretty_midi.Note(velocity=velocity, pitch=pitch, start=s, end=e) new_inst.notes.append(new_note) for ctrl in pm.instruments[i].control_changes: if ctrl.time >= start and ctrl.time < end: new_ctrl = pretty_midi.ControlChange(number=ctrl.number, value=ctrl.value, time=ctrl.time - start) new_inst.control_changes.append(new_ctrl) new_pm.instruments.append(new_inst) new_pm.write('tmp.mid') return new_pm
def test_get_piano_roll_and_get_chroma(): pm = pretty_midi.PrettyMIDI() assert pm.get_piano_roll().shape == (128, 0) # Currently just a rudimentary test since it's hard to test things like # pitch bends correctly inst = pretty_midi.Instrument(0) pm.instruments.append(inst) inst.notes.append( pretty_midi.Note(pitch=40, velocity=100, start=0.05, end=0.45)) inst = pretty_midi.Instrument(0) pm.instruments.append(inst) inst.notes.append( pretty_midi.Note(pitch=40, velocity=50, start=0.35, end=0.5)) inst.notes.append( pretty_midi.Note(pitch=45, velocity=100, start=0.1, end=0.2)) inst = pretty_midi.Instrument(0) pm.instruments.append(inst) inst.control_changes.append( pretty_midi.ControlChange(number=64, value=65, time=0.12)) inst.control_changes.append( pretty_midi.ControlChange(number=64, value=63, time=0.5)) inst.notes.append( pretty_midi.Note(pitch=50, velocity=50, start=0.35, end=0.4)) inst.notes.append( pretty_midi.Note(pitch=55, velocity=20, start=0.1, end=0.15)) inst.notes.append( pretty_midi.Note(pitch=55, velocity=10, start=0.2, end=0.25)) inst.notes.append( pretty_midi.Note(pitch=55, velocity=50, start=0.3, end=0.42)) expected_piano_roll = np.zeros((128, 50)) expected_piano_roll[40, 5:35] = 100 expected_piano_roll[40, 35:45] = 150 expected_piano_roll[40, 45:] = 50 expected_piano_roll[45, 10:20] = 100 expected_piano_roll[50, 35:40] = 50 expected_piano_roll[55, 10:15] = 20 expected_piano_roll[55, 20:25] = 10 expected_piano_roll[55, 30:42] = 50 assert np.allclose(pm.get_piano_roll(pedal_threshold=None), expected_piano_roll) expected_piano_roll[50, 35:50] = 50 expected_piano_roll[55, 10:30] = 20 expected_piano_roll[55, 30:50] = 50 assert np.allclose(pm.get_piano_roll(), expected_piano_roll) expected_chroma = np.zeros((12, 50)) expected_chroma[4, 5:35] = 100 expected_chroma[4, 35:45] = 150 expected_chroma[4, 45:] = 50 expected_chroma[9, 10:20] = 100 expected_chroma[2, 35:40] = 50 expected_chroma[7, 10:15] = 20 expected_chroma[7, 20:25] = 10 expected_chroma[7, 30:42] = 50 assert np.allclose(pm.get_chroma(pedal_threshold=None), expected_chroma) expected_chroma[2, 35:50] = 50 expected_chroma[7, 10:30] = 20 expected_chroma[7, 30:50] = 50 assert np.allclose(pm.get_chroma(), expected_chroma)
def test_adjust_times(): # Simple tests for adjusting note times def simple(): pm = pretty_midi.PrettyMIDI() i = pretty_midi.Instrument(0) # Create 9 notes, at times [1, 2, 3, 4, 5, 6, 7, 8, 9] for n, start in enumerate(range(1, 10)): i.notes.append(pretty_midi.Note(100, 100 + n, start, start + .5)) pm.instruments.append(i) return pm # Test notes are interpolated as expected pm = simple() pm.adjust_times([0, 10], [5, 20]) for note, start in zip(pm.instruments[0].notes, 1.5 * np.arange(1, 10) + 5): assert note.start == start # Test notes are all ommitted when adjustment range doesn't cover them pm = simple() pm.adjust_times([10, 20], [5, 10]) assert len(pm.instruments[0].notes) == 0 # Test repeated mapping times pm = simple() pm.adjust_times([0, 5, 6.5, 10], [5, 10, 10, 17]) # Original times [1, 2, 3, 4, 7, 8, 9] # The notes at times 5 and 6 have their durations squashed to zero expected_starts = [6, 7, 8, 9, 11, 13, 15] assert np.allclose([n.start for n in pm.instruments[0].notes], expected_starts) pm = simple() pm.adjust_times([0, 5, 5, 10], [7, 12, 13, 17]) # Original times [1, 2, 3, 4, 5, 6, 7, 8, 9] expected_starts = [8, 9, 10, 11, 12, 13, 14, 15, 16] assert np.allclose([n.start for n in pm.instruments[0].notes], expected_starts) # Complicated example pm = simple() # Include pitch bends and control changes to test adjust_events pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(100, 1.)) # Include event which fall within omitted region pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(200, 7.)) pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(0, 7.1)) # Include event which falls outside of the track pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 10.)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 0, .5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 1, 5.5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 2, 7.5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 3, 20.)) # Include track-level meta events to test adjust_meta pm.time_signature_changes.append(pretty_midi.TimeSignature(3, 4, .1)) pm.time_signature_changes.append(pretty_midi.TimeSignature(4, 4, 5.2)) pm.time_signature_changes.append(pretty_midi.TimeSignature(6, 4, 6.2)) pm.time_signature_changes.append(pretty_midi.TimeSignature(5, 4, 15.3)) pm.key_signature_changes.append(pretty_midi.KeySignature(1, 1.)) pm.key_signature_changes.append(pretty_midi.KeySignature(2, 6.2)) pm.key_signature_changes.append(pretty_midi.KeySignature(3, 7.2)) pm.key_signature_changes.append(pretty_midi.KeySignature(4, 12.3)) # Add in tempo changes - 100 bpm at 0s pm._tick_scales[0] = (0, 60. / (100 * pm.resolution)) # 110 bpm at 6s pm._tick_scales.append((2200, 60. / (110 * pm.resolution))) # 120 bpm at 8.1s pm._tick_scales.append((3047, 60. / (120 * pm.resolution))) # 150 bpm at 8.3s pm._tick_scales.append((3135, 60. / (150 * pm.resolution))) # 80 bpm at 9.3s pm._tick_scales.append((3685, 60. / (80 * pm.resolution))) pm._update_tick_to_time(20000) # Adjust times, with a collapsing section in original and new times pm.adjust_times([2., 3.1, 3.1, 5.1, 7.5, 10], [5., 6., 7., 8.5, 8.5, 11]) # Original tempo change times: [0, 6, 8.1, 8.3, 9.3] # Plus tempo changes at each of new_times which are not collapsed # Plus tempo change at 0s by default expected_times = [ 0., 5., 6., 8.5, 8.5 + (6 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (8.1 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (8.3 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (9.3 - 5.1) * (11 - 8.5) / (10 - 5.1) ] # Tempos scaled by differences in timing, plus 120 bpm at the beginning expected_tempi = [ 120., 100 * (3.1 - 2) / (6 - 5), 100 * (5.1 - 3.1) / (8.5 - 6), 100 * (10 - 5.1) / (11 - 8.5), 110 * (10 - 5.1) / (11 - 8.5), 120 * (10 - 5.1) / (11 - 8.5), 150 * (10 - 5.1) / (11 - 8.5), 80 * (10 - 5.1) / (11 - 8.5) ] change_times, tempi = pm.get_tempo_changes() # Due to the fact that tempo change times must occur at discrete ticks, we # must raise the relative tolerance when comparing assert np.allclose(expected_times, change_times, rtol=.001) assert np.allclose(expected_tempi, tempi, rtol=.002) # Test that all other events were interpolated as expected note_starts = [ 5.0, 5 + 1 / 1.1, 6 + .9 / (2 / 2.5), 6 + 1.9 / (2 / 2.5), 8.5 + .5, 8.5 + 1.5 ] note_ends = [ 5 + .5 / 1.1, 6 + .4 / (2 / 2.5), 6 + 1.4 / (2 / 2.5), 8.5, 8.5 + 1., 10 + .5 ] note_pitches = [101, 102, 103, 104, 107, 108] for note, s, e, p in zip(pm.instruments[0].notes, note_starts, note_ends, note_pitches): assert note.start == s assert note.end == e assert note.pitch == p bend_times = [5., 8.5, 8.5] bend_pitches = [100, 200, 0] for bend, t, p in zip(pm.instruments[0].pitch_bends, bend_times, bend_pitches): assert bend.time == t assert bend.pitch == p cc_times = [5., 8.5, 8.5] cc_values = [0, 1, 2] for cc, t, v in zip(pm.instruments[0].control_changes, cc_times, cc_values): assert cc.time == t assert cc.value == v # The first time signature change will be placed at the first interpolated # downbeat location - so, start by computing the location of the first # downbeat after the start of original_times, then interpolate it first_downbeat_after = .1 + 2 * 3 * 60. / 100. first_ts_time = 6. + (first_downbeat_after - 3.1) / (2. / 2.5) ts_times = [first_ts_time, 8.5, 8.5] ts_numerators = [3, 4, 6] for ts, t, n in zip(pm.time_signature_changes, ts_times, ts_numerators): assert np.isclose(ts.time, t) assert ts.numerator == n ks_times = [5., 8.5, 8.5] ks_keys = [1, 2, 3] for ks, t, k in zip(pm.key_signature_changes, ks_times, ks_keys): assert ks.time == t assert ks.key_number == k
def sequence_proto_to_pretty_midi(sequence): """Convert tensorflow.magenta.NoteSequence proto to a PrettyMIDI. Time is stored in the NoteSequence in absolute values (seconds) as opposed to relative values (MIDI ticks). When the NoteSequence is translated back to PrettyMIDI the absolute time is retained. The tempo map is also recreated. Args: sequence: A tensorfow.magenta.NoteSequence proto. Returns: A pretty_midi.PrettyMIDI object or None if sequence could not be decoded. """ kwargs = {} if sequence.tempos and sequence.tempos[0].time == 0: kwargs['initial_tempo'] = sequence.tempos[0].qpm pm = pretty_midi.PrettyMIDI(resolution=sequence.ticks_per_quarter, **kwargs) # Create an empty instrument to contain time and key signatures. instrument = pretty_midi.Instrument(0) pm.instruments.append(instrument) # Populate time signatures. for seq_ts in sequence.time_signatures: time_signature = pretty_midi.containers.TimeSignature( seq_ts.numerator, seq_ts.denominator, seq_ts.time) pm.time_signature_changes.append(time_signature) # Populate key signatures. for seq_key in sequence.key_signatures: key_number = seq_key.key if seq_key.mode == seq_key.MINOR: key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET key_signature = pretty_midi.containers.KeySignature( key_number, seq_key.time) pm.key_signature_changes.append(key_signature) # Populate tempo. The first tempo change was done in PrettyMIDI constructor. # TODO(@douglaseck): Update this code if pretty_midi adds the ability to # write tempo. if len(sequence.tempos) > 1: for seq_tempo in sequence.tempos[1:]: tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm) tick = pm.time_to_tick(seq_tempo.time) # pylint: disable=protected-access pm._PrettyMIDI__tick_scales.append((tick, tick_scale)) # pylint: enable=protected-access # Populate instrument events by first gathering notes and other event types # in lists then write them sorted to the PrettyMidi object. instrument_events = defaultdict(lambda: defaultdict(list)) for seq_note in sequence.notes: instrument_events[(seq_note.instrument, seq_note.program, seq_note.is_drum)]['notes'].append( pretty_midi.Note(seq_note.velocity, seq_note.pitch, seq_note.start_time, seq_note.end_time)) for seq_bend in sequence.pitch_bends: instrument_events[(seq_bend.instrument, seq_bend.program, seq_bend.is_drum)]['bends'].append( pretty_midi.PitchBend(seq_bend.bend, seq_bend.time)) for seq_cc in sequence.control_changes: instrument_events[(seq_cc.instrument, seq_cc.program, seq_cc.is_drum)]['controls'].append( pretty_midi.ControlChange( seq_cc.control_number, seq_cc.control_value, seq_cc.time)) for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()): # For instr_id 0 append to the instrument created above. if instr_id > 0: instrument = pretty_midi.Instrument(prog_id, is_drum) pm.instruments.append(instrument) instrument.program = prog_id instrument.notes = instrument_events[(instr_id, prog_id, is_drum)]['notes'] instrument.pitch_bends = instrument_events[(instr_id, prog_id, is_drum)]['bends'] instrument.control_changes = instrument_events[(instr_id, prog_id, is_drum)]['controls'] return pm
def make_music( pitches=60, durs=0.333, pgm=1, pan=64, is_drum=False, format="inbrowser", sr=16000, resolution=220, ): """Turn lists of numbers into music. Converts pitch and duration values into MIDI and/or audio playback. Uses `pretty_midi` for MIDI representation handling, fluidsynth for resynthesis, and `IPython.display.Audio` for browser playback. Parameters ---------- pitches : list or scalar List of pitches, or scalar if constant pitch. Floating point values are interpreted as microtonal pitch deviations. durs: list or scalar List of durations, or scalar if constant duration. pgm: number MIDI program number, in range ``[0, 127]``. pan: number Pan value, in range ``[0, 127]``. is_drum : bool If True use percussion channel 10. format : string Which format to render sound to? - `'MIDI'` returns MIDI as a `pretty_midi` object - `'audio'` returns waveforms as a `numpy` nd.array - `'inbrowser'` returns `IPython.display.Audio` widget - `'autoplay'` returns `IPython.display.Audio` widget and plays it Returns ------- synthesized: depends on the value of `format`. Notes ----- If len(pitches) and len(durs) do not match, the smaller list is extended to match the length of the longer list by repeating the last value. """ # check and convert to list if needed pitches = pitches if isinstance(pitches, list) else [pitches] durs = durs if isinstance(durs, list) else [durs] # extend short lists if size mismatch max_length = max(len(pitches), len(durs)) pitches += [pitches[-1]] * (max_length - len(pitches)) durs += [durs[-1]] * (max_length - len(durs)) # create a PrettyMIDI score score = pretty_midi.PrettyMIDI(resolution=resolution) # create a list of instruments one for each voice (for polypohonic pitch bend) num_voices = max([len(p) if isinstance(p, list) else 1 for p in pitches]) ins = [ pretty_midi.Instrument(program=max(pgm - 1, 0), is_drum=is_drum) for i in range(num_voices) ] # apply pan to all instruments for instrument in ins: cc = pretty_midi.ControlChange(10, pan, 0) instrument.control_changes.append(cc) # iterate through music now_time = 0 for pitch, dur in zip(pitches, durs): # rest if pitch is None if pitch is not None: # convert to list if needed pitch = pitch if isinstance(pitch, list) else [pitch] # loop through each voice of the list for voice_index, pitch_val in enumerate(pitch): # split into 12tet and microtones micros, twlvtet = math.modf(pitch_val) # create a new note note = pretty_midi.Note( velocity=100, pitch=int(twlvtet), start=now_time, end=now_time + dur, ) # and add it to the instrument ins[voice_index].notes.append(note) # if microtonal if micros != 0: # create a new pitch bend # note: 4096 is a semitone in standard MIDI +/-2 pitchbend range micropitch = pretty_midi.PitchBend(pitch=int( round(micros * 4096)), time=now_time) # and add it to the instrument ins[voice_index].pitch_bends.append(micropitch) # advance time now_time += dur # add instrument to the score score.instruments.extend(ins) # which format to render if format == "MIDI": return score elif format == "audio": return score.fluidsynth(fs=sr) elif format == "inbrowser": return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr) elif format == "autoplay": return IPython.display.Audio(score.fluidsynth(fs=sr), rate=sr, autoplay=True) else: raise ValueError( "Your `format` argument did not match one of the available options" )
def exprsco_to_midi(exprsco): import pretty_midi rate, nsamps, exprsco = exprsco # Create MIDI instruments p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)') p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)') tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1') no_prog = pretty_midi.instrument_name_to_program('Breath Noise') p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False) p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False) tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False) no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True) # Iterate through score to extract channel notes notes = {} ccs = {} for i, ch in enumerate(np.split(exprsco, 4, axis=1)): ch = ch[:, 0, :] # MIDI doesn't allow velocity 0 messages so set tr velocity to 1 if i == 2: ch[:, 1] = 1 last_velocity = 1 else: last_velocity = 0 last_note = 0 last_timbre = 0 note_starts = [] note_ends = [] ch_ccs = [] for s, (note, velocity, timbre) in enumerate(ch): if note != last_note: if note == 0: note_ends.append(s) else: if last_note == 0: note_starts.append((s, note, velocity)) else: note_ends.append(s) note_starts.append((s, note, velocity)) else: if velocity != last_velocity: ch_ccs.append((s, 11, velocity)) if timbre != last_timbre: ch_ccs.append((s, 12, timbre)) last_note = note last_velocity = velocity last_timbre = timbre if last_note != 0: note_ends.append(s + 1) assert len(note_starts) == len(note_ends) notes[i] = zip(note_starts, note_ends) ccs[i] = ch_ccs # Add notes to MIDI instruments for i, ins in enumerate([p1, p2, tr, no]): for (start_samp, note, velocity), end_samp in notes[i]: assert end_samp > start_samp start_t, end_t = start_samp / 44100., end_samp / 44100. note = pretty_midi.Note(velocity=velocity, pitch=note, start=start_t, end=end_t) ins.notes.append(note) for samp, cc_num, arg in ccs[i]: cc = pretty_midi.ControlChange(cc_num, arg, samp / 44100.) ins.control_changes.append(cc) # Add instruments to MIDI file midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend([p1, p2, tr, no]) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.) midi.time_signature_changes.append(eos) # Write/read MIDI file mf = tempfile.NamedTemporaryFile('rb') midi.write(mf.name) midi = mf.read() mf.close() return midi
def note_sequence_to_pretty_midi(sequence, drop_events_n_seconds_after_last_note=None): """Convert NoteSequence to a PrettyMIDI. Time is stored in the NoteSequence in absolute values (seconds) as opposed to relative values (MIDI ticks). When the NoteSequence is translated back to PrettyMIDI the absolute time is retained. The tempo map is also recreated. Args: sequence: A NoteSequence. drop_events_n_seconds_after_last_note: Events (e.g., time signature changes) that occur this many seconds after the last note will be dropped. If None, then no events will be dropped. Returns: A pretty_midi.PrettyMIDI object or None if sequence could not be decoded. """ ticks_per_quarter = sequence.ticks_per_quarter or constants.STANDARD_PPQ max_event_time = None if drop_events_n_seconds_after_last_note is not None: max_event_time = (max([n.end_time for n in sequence.notes] or [0]) + drop_events_n_seconds_after_last_note) # Try to find a tempo at time zero. The list is not guaranteed to be in order. initial_seq_tempo = None for seq_tempo in sequence.tempos: if seq_tempo.time == 0: initial_seq_tempo = seq_tempo break kwargs = {} if initial_seq_tempo: kwargs['initial_tempo'] = initial_seq_tempo.qpm else: kwargs['initial_tempo'] = constants.DEFAULT_QUARTERS_PER_MINUTE pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs) # Create an empty instrument to contain time and key signatures. instrument = pretty_midi.Instrument(0) pm.instruments.append(instrument) # Populate time signatures. for seq_ts in sequence.time_signatures: if max_event_time and seq_ts.time > max_event_time: continue time_signature = pretty_midi.containers.TimeSignature( seq_ts.numerator, seq_ts.denominator, seq_ts.time) pm.time_signature_changes.append(time_signature) # Populate key signatures. for seq_key in sequence.key_signatures: if max_event_time and seq_key.time > max_event_time: continue key_number = seq_key.key if seq_key.mode == seq_key.MINOR: key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET key_signature = pretty_midi.containers.KeySignature( key_number, seq_key.time) pm.key_signature_changes.append(key_signature) # Populate tempos. # TODO(douglaseck): Update this code if pretty_midi adds the ability to # write tempo. for seq_tempo in sequence.tempos: # Skip if this tempo was added in the PrettyMIDI constructor. if seq_tempo == initial_seq_tempo: continue if max_event_time and seq_tempo.time > max_event_time: continue tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm) tick = pm.time_to_tick(seq_tempo.time) # pylint: disable=protected-access pm._tick_scales.append((tick, tick_scale)) pm._update_tick_to_time(0) # pylint: enable=protected-access # Populate instrument names by first creating an instrument map between # instrument index and name. # Then, going over this map in the instrument event for loop inst_infos = {} for inst_info in sequence.instrument_infos: inst_infos[inst_info.instrument] = inst_info.name # Populate instrument events by first gathering notes and other event types # in lists then write them sorted to the PrettyMidi object. instrument_events = collections.defaultdict( lambda: collections.defaultdict(list)) for seq_note in sequence.notes: instrument_events[(seq_note.instrument, seq_note.program, seq_note.is_drum)]['notes'].append( pretty_midi.Note(seq_note.velocity, seq_note.pitch, seq_note.start_time, seq_note.end_time)) for seq_bend in sequence.pitch_bends: if max_event_time and seq_bend.time > max_event_time: continue instrument_events[(seq_bend.instrument, seq_bend.program, seq_bend.is_drum)]['bends'].append( pretty_midi.PitchBend(seq_bend.bend, seq_bend.time)) for seq_cc in sequence.control_changes: if max_event_time and seq_cc.time > max_event_time: continue instrument_events[(seq_cc.instrument, seq_cc.program, seq_cc.is_drum)]['controls'].append( pretty_midi.ControlChange( seq_cc.control_number, seq_cc.control_value, seq_cc.time)) for ta in sequence.text_annotations: from magenta.music.chords_lib import CHORD_SYMBOL if ta.annotation_type == CHORD_SYMBOL and ta.text != constants.NO_CHORD: pm.lyrics.append(pretty_midi.Lyric(ta.text, ta.time)) # timing_track.append(mido.MetaMessage( # 'end_of_track', time=timing_track[-1].time + 1)) for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()): # For instr_id 0 append to the instrument created above. if instr_id > 0: instrument = pretty_midi.Instrument(prog_id, is_drum) pm.instruments.append(instrument) else: instrument.is_drum = is_drum # propagate instrument name to the midi file instrument.program = prog_id if instr_id in inst_infos: instrument.name = inst_infos[instr_id] instrument.notes = instrument_events[(instr_id, prog_id, is_drum)]['notes'] instrument.pitch_bends = instrument_events[(instr_id, prog_id, is_drum)]['bends'] instrument.control_changes = instrument_events[(instr_id, prog_id, is_drum)]['controls'] return pm
def apply_sustain_control_changes(midi): all_CCs = [] for instr in midi.instruments: all_CCs += instr.control_changes all_pedals = [cc for cc in all_CCs if cc.number == 64] pedals_sorted = sorted(all_pedals, key=lambda x: x.time) #Add an extra pedal off at the end, just in case pedals_sorted += [pm.ControlChange(64, 0, midi.get_end_time())] #Create a pedal_ON array such that pedal_ON[i]>0 iff pedal is on at tick i #If pedal_ON[i]>0, its value is the time at which pedal becomes off again pedal_ON = np.zeros(midi._PrettyMIDI__tick_to_time.shape, dtype=float) # -1 if pedal is currently off, otherwise tick time of first time it is on. ON_idx = -1 for cc in pedals_sorted: if cc.value > 64: if ON_idx < 0: ON_idx = midi.time_to_tick(cc.time) else: # Pedal is already ON pass else: if ON_idx > 0: pedal_ON[ON_idx:midi.time_to_tick(cc.time)] = cc.time ON_idx = -1 else: # Pedal is already OFF pass # Copy to keep time signatures and tempo changes, but remove notes and CCs new_midi = copy.deepcopy(midi) new_midi.instruments = [] # Store the notes per pitch, to trim them afterwards. all_notes = np.empty([128], dtype=object) for i in range(128): all_notes[i] = [] for instr in midi.instruments: # First, extend all the notes until the pedal is off for note in instr.notes: start_tick = midi.time_to_tick(note.start) end_tick = midi.time_to_tick(note.end) if np.any(pedal_ON[start_tick:end_tick] > 0): # Pedal is on while note is on end_pedal = np.max(pedal_ON[start_tick:end_tick]) note.end = max(note.end, end_pedal) else: # Pedal is not on while note is on, no modifications needed pass all_notes[note.pitch] += [note] new_instr = pm.Instrument( program=pm.instrument_name_to_program('Acoustic Grand Piano'), name="Piano") # Then, trim notes so they don't overlap for note_list in all_notes: if note_list != []: note_list = sorted(note_list, key=lambda x: x.start) for note_1, note_2 in zip(note_list[:-1], note_list[1:]): note_1.end = min(note_1.end, note_2.start) new_instr.notes += [note_1] new_instr.notes += [note_list[-1]] new_midi.instruments.append(new_instr) return new_midi
def save_note_pedal_to_CC(midi_obj, bool_pedal=False, disklavier=False): # input = pretty midi object with pedal inf embedded in note (e.g. note.pedal_at_start etc.) assert len(midi_obj.instruments) == 1 instrument = midi_obj.instruments[0] notes = instrument.notes notes.sort(key=lambda note: note.start) num_notes = len(notes) eps = 0.03 # hyper-parameter def to_8(value): # if value == True: # return 127 # else: # return 0 return int(min(max(value, 0), 127)) primary_pedal = [] secondary_pedal = [] for i in range(num_notes): note = notes[i] next_note = notes[min(i + 1, num_notes - 1)] # print(note.start, note.end, note.pitch, note.pedal_refresh, note.pedal_cut) pedal1 = pretty_midi.ControlChange(number=64, value=to_8(note.pedal_at_start), time=note.start - eps) pedal2 = pretty_midi.ControlChange(number=64, value=to_8(note.pedal_at_end), time=note.end - eps) soft_pedal = pretty_midi.ControlChange(number=67, value=to_8(note.soft_pedal), time=note.start - eps) instrument.control_changes.append(pedal1) instrument.control_changes.append(pedal2) instrument.control_changes.append(soft_pedal) primary_pedal.append(pedal1) primary_pedal.append(pedal2) # if note.pedal_refresh: refresh_time = note.start + note.pedal_refresh_time #(note.end - note.start) * note.pedal_refresh_time pedal3 = pretty_midi.ControlChange(number=64, value=to_8(note.pedal_refresh), time=refresh_time) if pedal3.time < pedal2.time: secondary_pedal.append(pedal3) instrument.control_changes.append(pedal3) # # if note.pedal_cut: # cut_time = note.start - (note.end - note.start) * note.pedal_cut_time cut_time = note.end + note.pedal_cut_time pedal4 = pretty_midi.ControlChange(number=64, value=to_8(note.pedal_cut), time=cut_time) instrument.control_changes.append(pedal4) secondary_pedal.append(pedal4) primary_pedal.sort(key=lambda x: x.time) last_note_end = notes[-1].end # end pedal 3 seconds after the last note last_pedal = pretty_midi.ControlChange(number=64, value=0, time=last_note_end + 3) instrument.control_changes.append(last_pedal) return midi_obj
def sequence_proto_to_pretty_midi(sequence, drop_events_n_seconds_after_last_note=None): """Convert tensorflow.magenta.NoteSequence proto to a PrettyMIDI. Time is stored in the NoteSequence in absolute values (seconds) as opposed to relative values (MIDI ticks). When the NoteSequence is translated back to PrettyMIDI the absolute time is retained. The tempo map is also recreated. Args: sequence: A tensorfow.magenta.NoteSequence proto. drop_events_n_seconds_after_last_note: Events (e.g., time signature changes) that occur this many seconds after the last note will be dropped. If None, then no events will be dropped. Returns: A pretty_midi.PrettyMIDI object or None if sequence could not be decoded. """ ticks_per_quarter = (sequence.ticks_per_quarter if sequence.ticks_per_quarter else constants.STANDARD_PPQ) max_event_time = None if drop_events_n_seconds_after_last_note is not None: max_event_time = (max([n.end_time for n in sequence.notes] or [0]) + drop_events_n_seconds_after_last_note) # Try to find a tempo at time zero. The list is not guaranteed to be in order. initial_seq_tempo = None for seq_tempo in sequence.tempos: if seq_tempo.time == 0: initial_seq_tempo = seq_tempo break kwargs = {} kwargs['initial_tempo'] = (initial_seq_tempo.qpm if initial_seq_tempo else constants.DEFAULT_QUARTERS_PER_MINUTE) pm = pretty_midi.PrettyMIDI(resolution=ticks_per_quarter, **kwargs) # Create an empty instrument to contain time and key signatures. instrument = pretty_midi.Instrument(0) pm.instruments.append(instrument) # Populate time signatures. for seq_ts in sequence.time_signatures: if max_event_time and seq_ts.time > max_event_time: continue time_signature = pretty_midi.containers.TimeSignature( seq_ts.numerator, seq_ts.denominator, seq_ts.time) pm.time_signature_changes.append(time_signature) # Populate key signatures. for seq_key in sequence.key_signatures: if max_event_time and seq_key.time > max_event_time: continue key_number = seq_key.key if seq_key.mode == seq_key.MINOR: key_number += _PRETTY_MIDI_MAJOR_TO_MINOR_OFFSET key_signature = pretty_midi.containers.KeySignature( key_number, seq_key.time) pm.key_signature_changes.append(key_signature) # Populate tempos. # TODO(douglaseck): Update this code if pretty_midi adds the ability to # write tempo. for seq_tempo in sequence.tempos: # Skip if this tempo was added in the PrettyMIDI constructor. if seq_tempo == initial_seq_tempo: continue if max_event_time and seq_tempo.time > max_event_time: continue tick_scale = 60.0 / (pm.resolution * seq_tempo.qpm) tick = pm.time_to_tick(seq_tempo.time) # pylint: disable=protected-access pm._tick_scales.append((tick, tick_scale)) pm._update_tick_to_time(0) # pylint: enable=protected-access # Populate instrument events by first gathering notes and other event types # in lists then write them sorted to the PrettyMidi object. instrument_events = defaultdict(lambda: defaultdict(list)) for seq_note in sequence.notes: instrument_events[(seq_note.instrument, seq_note.program, seq_note.is_drum)]['notes'].append( pretty_midi.Note(seq_note.velocity, seq_note.pitch, seq_note.start_time, seq_note.end_time)) for seq_bend in sequence.pitch_bends: if max_event_time and seq_bend.time > max_event_time: continue instrument_events[(seq_bend.instrument, seq_bend.program, seq_bend.is_drum)]['bends'].append( pretty_midi.PitchBend(seq_bend.bend, seq_bend.time)) for seq_cc in sequence.control_changes: if max_event_time and seq_cc.time > max_event_time: continue instrument_events[(seq_cc.instrument, seq_cc.program, seq_cc.is_drum)]['controls'].append( pretty_midi.ControlChange( seq_cc.control_number, seq_cc.control_value, seq_cc.time)) for (instr_id, prog_id, is_drum) in sorted(instrument_events.keys()): # For instr_id 0 append to the instrument created above. if instr_id > 0: instrument = pretty_midi.Instrument(prog_id, is_drum) pm.instruments.append(instrument) instrument.program = prog_id instrument.notes = instrument_events[(instr_id, prog_id, is_drum)]['notes'] instrument.pitch_bends = instrument_events[(instr_id, prog_id, is_drum)]['bends'] instrument.control_changes = instrument_events[(instr_id, prog_id, is_drum)]['controls'] return pm
def test_df_to_midi(): df = pd.DataFrame({ "onset": 0, "track": [0, 0, 1], "pitch": [10, 20, 30], "dur": 1000 }) # Test basic writing fileio.df_to_midi(df, "test.mid") assert fileio.midi_to_df("test.mid").equals( df), "Writing df to MIDI and reading changes df." # Test that writing should overwrite existing notes df.pitch += 10 fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals( df), "Writing df to MIDI with existing MIDI does not overwrite notes." # Test that writing skips non-overwritten notes fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid", excerpt_start=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [10, 20, 30, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test2.mid").equals( expected), "Writing to MIDI doesn't copy notes before excerpt_start" # Test that writing skips non-overwritten notes past end fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI doesn't copy notes after excerpt_length" df.track = 2 fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [2, 2, 2, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI with extra track breaks" # Check all non-note events midi_obj = pretty_midi.PrettyMIDI("test.mid") midi_obj.instruments[0].name = "test" midi_obj.instruments[0].program = 100 midi_obj.instruments[0].is_drum = True midi_obj.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 0)) midi_obj.instruments[0].control_changes.append( pretty_midi.ControlChange(10, 10, 0)) midi_obj.lyrics.append(pretty_midi.Lyric("test", 0)) midi_obj.time_signature_changes.append(pretty_midi.TimeSignature(2, 4, 1)) midi_obj.key_signature_changes.append(pretty_midi.KeySignature(5, 1)) midi_obj.write("test.mid") fileio.df_to_midi(expected, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals(expected) # Check non-note events and data here new_midi = pretty_midi.PrettyMIDI("test2.mid") for instrument, new_instrument in zip(midi_obj.instruments, new_midi.instruments): assert instrument.name == new_instrument.name assert instrument.program == new_instrument.program assert instrument.is_drum == new_instrument.is_drum for pb, new_pb in zip(instrument.pitch_bends, new_instrument.pitch_bends): assert pb.pitch == new_pb.pitch assert pb.time == new_pb.time for cc, new_cc in zip(instrument.control_changes, new_instrument.control_changes): assert cc.number == new_cc.number assert cc.value == new_cc.value assert cc.time == new_cc.time for ks, new_ks in zip(midi_obj.key_signature_changes, new_midi.key_signature_changes): assert ks.key_number == new_ks.key_number assert ks.time == new_ks.time for lyric, new_lyric in zip(midi_obj.lyrics, new_midi.lyrics): assert lyric.text == new_lyric.text assert lyric.time == new_lyric.time for ts, new_ts in zip(midi_obj.time_signature_changes, new_midi.time_signature_changes): assert ts.numerator == new_ts.numerator assert ts.denominator == new_ts.denominator assert ts.time == new_ts.time for filename in ["test.mid", "test2.mid"]: try: os.remove(filename) except Exception: pass
def save_midi_notes_as_piano_midi(midi_notes, midi_pedals, output_name, bool_pedal=False, disklavier=False): """ Generate midi file by using received midi notes and midi pedals Args: midi_notes (1-D list) : list of pretty_midi.Note() of shape (num_notes, ) midi_pedals (1-D list): list of pretty_midi pedal value of shape (num_pedals, ) output_name (string) : output midi file name bool_pedal (boolean) : check whether the method needs to handle meaningless pedal values disklavier (boolean) : unused Returns: - Example: (in data_class.py -> make_score_midi() >>> midi_notes, midi_pedals = xml_utils.xml_notes_to_midi(self.xml_notes) >>> xml_utils.save_midi_notes_as_piano_midi(midi_notes, [], midi_file_name, bool_pedal=True) """ piano_midi = pretty_midi.PrettyMIDI() piano_program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano') piano = pretty_midi.Instrument(program=piano_program) # pedal_threhsold = 60 # pedal_time_margin = 0.2 for note in midi_notes: piano.notes.append(note) piano_midi.instruments.append(piano) # piano_midi = midi_utils.save_note_pedal_to_CC(piano_midi) if bool_pedal: for pedal in midi_pedals: if pedal.value < pedal_cleaning.THRESHOLD: pedal.value = 0 last_note_end = midi_notes[-1].end # end pedal 3 seconds after the last note last_pedal = pretty_midi.ControlChange(number=64, value=0, time=last_note_end + 3) midi_pedals.append(last_pedal) piano_midi.instruments[0].control_changes = midi_pedals # # if disklavier: # pedals = piano_midi.instruments[0].control_changes # pedals.sort(key=lambda x:x.time) # previous_off_time = 0 # # prev_high_time = 0 # prev_low_time = 0 # # pedal_remove_candidate = [] # for pedal in pedals: # if pedal.number == 67: # continue # if pedal.time < 0.2: # continue # if pedal.value < pedal_threhsold: # previous_off_time = pedal.time # else: # time_passed = pedal.time - previous_off_time # if time_passed < pedal_time_margin: #hyperparameter # # pedal.time = previous_off_time + pedal_time_margin # pedal.value = 30 # # if pedal.value > 75: # if pedal.time - prev_high_time < 0.25: # pedal_remove_candidate.append(pedal) # else: # prev_high_time = pedal.time # if pedal.value < 55: # if pedal.time - prev_low_time < 0.25: # pedal_remove_candidate.append(pedal) # else: # prev_low_time = pedal.time # # for pedal in pedal_remove_candidate: # pedals.remove(pedal) piano_midi.write(output_name)
def tx2_to_midi(tx2): import pretty_midi tx2 = tx2.strip().splitlines() nsamps = sum([int(x.split('_')[1]) for x in tx2 if x[:2] == 'WT']) # Create MIDI instruments p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)') p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)') tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1') no_prog = pretty_midi.instrument_name_to_program('Breath Noise') p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False) p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False) tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False) no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True) name_to_ins = {'P1': p1, 'P2': p2, 'TR': tr, 'NO': no} name_to_pitch = {'P1': None, 'P2': None, 'TR': None, 'NO': None} name_to_start = {'P1': None, 'P2': None, 'TR': None, 'NO': None} name_to_max_velocity = {'P1': 15, 'P2': 15, 'TR': 1, 'NO': 15} samp = 0 for event in tx2: if event[:2] == 'WT': samp += int(event[3:]) else: tokens = event.split('_') name = tokens[0] ins = name_to_ins[tokens[0]] old_pitch = name_to_pitch[name] if tokens[1] == 'NOTEON': if old_pitch is not None: ins.notes.append(pretty_midi.Note( velocity=name_to_max_velocity[name], pitch=old_pitch, start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = int(tokens[2]) name_to_start[name] = samp elif tokens[1] == 'NOTEOFF': if old_pitch is not None: ins.notes.append(pretty_midi.Note( velocity=name_to_max_velocity[name], pitch=name_to_pitch[name], start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = None name_to_start[name] = None elif tokens[1] == 'VELOCITY': ins.control_changes.append(pretty_midi.ControlChange( 11, int(tokens[2]), samp / 44100.)) elif tokens[1] == 'TIMBRE': ins.control_changes.append(pretty_midi.ControlChange( 12, int(tokens[2]), samp / 44100.)) else: raise ValueError() # Deactivating this for generated files #for name, pitch in name_to_pitch.items(): # assert pitch is None # Create MIDI and add instruments midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend([p1, p2, tr, no]) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.) midi.time_signature_changes.append(eos) """ for ins in midi.instruments: print('-' * 80) print(ins.name) for note in ins.notes[:4]: print(note) for cc in ins.control_changes[:4]: print(cc) """ with tempfile.NamedTemporaryFile('rb') as mf: midi.write(mf.name) midi = mf.read() return midi