def proc_to_midi( melody_events, chord_events, key='C', to_chroma=False, bpm=120, beats_in_measure=4, save_path='./', name='test'): bpm = float(bpm) if bpm == 0.0: bpm = 120 beats_in_measure = int(beats_in_measure) lead_sheet = pretty_midi.PrettyMIDI(initial_tempo=bpm) beats_sec = 60.0 / bpm chord_track, chord_symbols = wrapping_chord(chord_events, beats_sec, to_chroma=to_chroma) melody_track = wrapping_melody(melody_events, beats_sec) ts = pretty_midi.TimeSignature(beats_in_measure, 4, 0) ks = pretty_midi.KeySignature(get_key_offset(key), 0) lead_sheet.time_signature_changes.append(ts) lead_sheet.key_signature_changes.append(ks) lead_sheet.instruments.append(melody_track) lead_sheet.instruments.append(chord_track) lead_sheet.lyrics = chord_symbols if not os.path.exists(save_path): os.makedirs(save_path) filename = os.path.join(save_path, name+'.mid') lead_sheet.write(filename) return filename
def proc_to_midi(melody_events, chord_events, chord_dic, key='C', to_chroma=False, bpm=120, beats_in_measure=4): bpm = float(bpm) if bpm == 0.0: bpm = 120 beats_in_measure = int(beats_in_measure) beats_sec = 60.0 / bpm lead_sheet = pretty_midi.PrettyMIDI(initial_tempo=bpm) chord_track = wrapping_chord(chord_events, chord_dic, beats_sec) melody_track = wrapping_melody(melody_events, beats_sec) ts = pretty_midi.TimeSignature(beats_in_measure, 4, 0) ks = pretty_midi.KeySignature(get_key_offset(key), 0) lead_sheet.time_signature_changes.append(ts) lead_sheet.key_signature_changes.append(ks) lead_sheet.instruments.append(melody_track) lead_sheet.instruments.append(chord_track) return lead_sheet
def decoding_to_midi(encoded_matrix, tempo=100, time_signature="4/4"): """ Decodes a matrix encoded for LSTM back to a PrettyMIDI file :param encoded_matrix: :param tempo: :param time_signature: :return midi_data: """ # Trim out beat matrix approx_to_zero = np.vectorize(lambda x: 1 if np.random.random() < x else 0) # Split encoded_matrix into piano_roll and attack_matrix piano_roll = approx_to_zero(encoded_matrix[:B5 - E2 + 1, :]) attack_matrix = approx_to_zero(encoded_matrix[-6:, :]) plucks_per_timestep = attack_matrix.sum(axis=0) pluck_nonzero = plucks_per_timestep.nonzero()[0] midi_data = pretty_midi.PrettyMIDI(initial_tempo=tempo) time_signature = time_signature.split('/') midi_data.time_signature_changes.append( pretty_midi.TimeSignature(int(time_signature[0]), int(time_signature[1]), 0)) midi_data.instruments.append(pretty_midi.Instrument(24, name="Guitar")) timesteps_per_second = tempo / 60 * beat_length for timestep in reversed(pluck_nonzero): plucks = np.array(attack_matrix[:, timestep].nonzero(), dtype=np.int32).flatten() notes_played = np.array(piano_roll[:, timestep].nonzero(), dtype=np.int32).flatten() try: pitches = notes_played[plucks] except IndexError: continue for pitch in pitches: notes_equal_pitch = list( filter(lambda note: note.pitch == pitch + E2, midi_data.instruments[0].notes)) if notes_equal_pitch: next_pluck = int(min(notes_equal_pitch, key=lambda note: note.start).start \ * timesteps_per_second) else: next_pluck = 1e20 note_timesteps = 1 while timestep + note_timesteps < piano_roll.shape[1] \ and timestep + note_timesteps < next_pluck \ and piano_roll[pitch, timestep + note_timesteps]: note_timesteps += 1 note_length = note_timesteps / timesteps_per_second begin = timestep / timesteps_per_second midi_data.instruments[0].notes.append( pretty_midi.Note(127, pitch + E2, begin, begin + note_length)) return midi_data
def pianoroll_to_midi(pianoroll, midi_folder, filename, instrument_name, bpm): if not os.path.exists(midi_folder): os.makedirs(midi_folder) midi = pm.PrettyMIDI(initial_tempo=bpm, resolution=200) midi.time_signature_changes.append(pm.TimeSignature(4, 4, 0)) piano_program = pm.instrument_name_to_program(instrument_name) piano = pm.Instrument(program=piano_program) ind = np.nonzero(pianoroll) for i in range(ind[0].shape[0]): note = pm.Note(velocity=80, pitch=ind[1][i], start=(60/(2*bpm))*ind[0][i], end=(60/(2*bpm))*ind[0][i] + 0.25) piano.notes.append(note) midi.instruments.append(piano) # print(midi.get_tempo_changes()) midi.write(midi_folder + filename+'.mid')
def matrix_to_midi(matrix, file_name='output', output_path='./', tempo=120): instr = pretty_midi.Instrument(0, True, name='beat') midi = pretty_midi.PrettyMIDI(initial_tempo=tempo) song = _matrix_to_notes(matrix, tempo) instr.notes = song midi.instruments = [instr] midi.time_signature_changes.append(pretty_midi.TimeSignature(4, 4, 0.0)) if output_path[-1] != '/': output_path += '/' midi.write(output_path + file_name + '.mid') print('[Midi Process][Output] Midi file for', file_name, 'to', output_path)
def pianoroll_to_midi_continous(pianoroll, midi_folder, filename, instrument_name, bpm): if not os.path.exists(midi_folder): os.makedirs(midi_folder) midi = pm.PrettyMIDI(initial_tempo=bpm, resolution=200) midi.time_signature_changes.append(pm.TimeSignature(4, 4, 0)) piano_program = pm.instrument_name_to_program(instrument_name) piano = pm.Instrument(program=piano_program) tracker = [] start_times = dict() for i, note_vector in enumerate(pianoroll): notes = list(note_vector.nonzero()[0]) # print('notes',notes) removal_list = [] for note in tracker: if note in notes and (i) % 8 is not 0: # print('removing', note, 'from notes') notes.remove(note) else: midi_note = pm.Note(velocity=80, pitch=note, start=(60 / (2 * bpm)) * start_times[note], end=(60 / (2 * bpm)) * i) piano.notes.append(midi_note) # print('removing', note, 'from tracker') removal_list.append(note) for note in removal_list: tracker.remove(note) # print('tracker',tracker) # print('notes',notes) for note in notes: tracker.append(note) start_times[note] = i # print('tracker',tracker) # print('-'*50) midi.instruments.append(piano) # print(midi.get_tempo_changes()) midi.write(midi_folder + filename + '.mid')
def test_get_end_time(): pm = pretty_midi.PrettyMIDI() inst = pretty_midi.Instrument(0) pm.instruments.append(inst) # When no events, end time should be 0 assert pm.get_end_time() == 0 # End time should be sensitive to inst notes, pitch bends, control changes inst.notes.append( pretty_midi.Note(start=0.5, end=1.7, pitch=30, velocity=100)) assert np.allclose(pm.get_end_time(), 1.7) inst.pitch_bends.append(pretty_midi.PitchBend(pitch=100, time=1.9)) assert np.allclose(pm.get_end_time(), 1.9) inst.control_changes.append( pretty_midi.ControlChange(number=0, value=10, time=2.1)) assert np.allclose(pm.get_end_time(), 2.1) # End time should be sensitive to meta events pm.time_signature_changes.append( pretty_midi.TimeSignature(numerator=4, denominator=4, time=2.3)) assert np.allclose(pm.get_end_time(), 2.3) pm.time_signature_changes.append( pretty_midi.KeySignature(key_number=10, time=2.5)) assert np.allclose(pm.get_end_time(), 2.5) pm.time_signature_changes.append(pretty_midi.Lyric(text='hey', time=2.7)) assert np.allclose(pm.get_end_time(), 2.7)
def test_adjust_times(): # Simple tests for adjusting note times def simple(): pm = pretty_midi.PrettyMIDI() i = pretty_midi.Instrument(0) # Create 9 notes, at times [1, 2, 3, 4, 5, 6, 7, 8, 9] for n, start in enumerate(range(1, 10)): i.notes.append(pretty_midi.Note(100, 100 + n, start, start + .5)) pm.instruments.append(i) return pm # Test notes are interpolated as expected pm = simple() pm.adjust_times([0, 10], [5, 20]) for note, start in zip(pm.instruments[0].notes, 1.5 * np.arange(1, 10) + 5): assert note.start == start # Test notes are all ommitted when adjustment range doesn't cover them pm = simple() pm.adjust_times([10, 20], [5, 10]) assert len(pm.instruments[0].notes) == 0 # Test repeated mapping times pm = simple() pm.adjust_times([0, 5, 6.5, 10], [5, 10, 10, 17]) # Original times [1, 2, 3, 4, 7, 8, 9] # The notes at times 5 and 6 have their durations squashed to zero expected_starts = [6, 7, 8, 9, 11, 13, 15] assert np.allclose([n.start for n in pm.instruments[0].notes], expected_starts) pm = simple() pm.adjust_times([0, 5, 5, 10], [7, 12, 13, 17]) # Original times [1, 2, 3, 4, 5, 6, 7, 8, 9] expected_starts = [8, 9, 10, 11, 12, 13, 14, 15, 16] assert np.allclose([n.start for n in pm.instruments[0].notes], expected_starts) # Complicated example pm = simple() # Include pitch bends and control changes to test adjust_events pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(100, 1.)) # Include event which fall within omitted region pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(200, 7.)) pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(0, 7.1)) # Include event which falls outside of the track pm.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 10.)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 0, .5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 1, 5.5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 2, 7.5)) pm.instruments[0].control_changes.append( pretty_midi.ControlChange(0, 3, 20.)) # Include track-level meta events to test adjust_meta pm.time_signature_changes.append(pretty_midi.TimeSignature(3, 4, .1)) pm.time_signature_changes.append(pretty_midi.TimeSignature(4, 4, 5.2)) pm.time_signature_changes.append(pretty_midi.TimeSignature(6, 4, 6.2)) pm.time_signature_changes.append(pretty_midi.TimeSignature(5, 4, 15.3)) pm.key_signature_changes.append(pretty_midi.KeySignature(1, 1.)) pm.key_signature_changes.append(pretty_midi.KeySignature(2, 6.2)) pm.key_signature_changes.append(pretty_midi.KeySignature(3, 7.2)) pm.key_signature_changes.append(pretty_midi.KeySignature(4, 12.3)) # Add in tempo changes - 100 bpm at 0s pm._tick_scales[0] = (0, 60. / (100 * pm.resolution)) # 110 bpm at 6s pm._tick_scales.append((2200, 60. / (110 * pm.resolution))) # 120 bpm at 8.1s pm._tick_scales.append((3047, 60. / (120 * pm.resolution))) # 150 bpm at 8.3s pm._tick_scales.append((3135, 60. / (150 * pm.resolution))) # 80 bpm at 9.3s pm._tick_scales.append((3685, 60. / (80 * pm.resolution))) pm._update_tick_to_time(20000) # Adjust times, with a collapsing section in original and new times pm.adjust_times([2., 3.1, 3.1, 5.1, 7.5, 10], [5., 6., 7., 8.5, 8.5, 11]) # Original tempo change times: [0, 6, 8.1, 8.3, 9.3] # Plus tempo changes at each of new_times which are not collapsed # Plus tempo change at 0s by default expected_times = [ 0., 5., 6., 8.5, 8.5 + (6 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (8.1 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (8.3 - 5.1) * (11 - 8.5) / (10 - 5.1), 8.5 + (9.3 - 5.1) * (11 - 8.5) / (10 - 5.1) ] # Tempos scaled by differences in timing, plus 120 bpm at the beginning expected_tempi = [ 120., 100 * (3.1 - 2) / (6 - 5), 100 * (5.1 - 3.1) / (8.5 - 6), 100 * (10 - 5.1) / (11 - 8.5), 110 * (10 - 5.1) / (11 - 8.5), 120 * (10 - 5.1) / (11 - 8.5), 150 * (10 - 5.1) / (11 - 8.5), 80 * (10 - 5.1) / (11 - 8.5) ] change_times, tempi = pm.get_tempo_changes() # Due to the fact that tempo change times must occur at discrete ticks, we # must raise the relative tolerance when comparing assert np.allclose(expected_times, change_times, rtol=.001) assert np.allclose(expected_tempi, tempi, rtol=.002) # Test that all other events were interpolated as expected note_starts = [ 5.0, 5 + 1 / 1.1, 6 + .9 / (2 / 2.5), 6 + 1.9 / (2 / 2.5), 8.5 + .5, 8.5 + 1.5 ] note_ends = [ 5 + .5 / 1.1, 6 + .4 / (2 / 2.5), 6 + 1.4 / (2 / 2.5), 8.5, 8.5 + 1., 10 + .5 ] note_pitches = [101, 102, 103, 104, 107, 108] for note, s, e, p in zip(pm.instruments[0].notes, note_starts, note_ends, note_pitches): assert note.start == s assert note.end == e assert note.pitch == p bend_times = [5., 8.5, 8.5] bend_pitches = [100, 200, 0] for bend, t, p in zip(pm.instruments[0].pitch_bends, bend_times, bend_pitches): assert bend.time == t assert bend.pitch == p cc_times = [5., 8.5, 8.5] cc_values = [0, 1, 2] for cc, t, v in zip(pm.instruments[0].control_changes, cc_times, cc_values): assert cc.time == t assert cc.value == v # The first time signature change will be placed at the first interpolated # downbeat location - so, start by computing the location of the first # downbeat after the start of original_times, then interpolate it first_downbeat_after = .1 + 2 * 3 * 60. / 100. first_ts_time = 6. + (first_downbeat_after - 3.1) / (2. / 2.5) ts_times = [first_ts_time, 8.5, 8.5] ts_numerators = [3, 4, 6] for ts, t, n in zip(pm.time_signature_changes, ts_times, ts_numerators): assert np.isclose(ts.time, t) assert ts.numerator == n ks_times = [5., 8.5, 8.5] ks_keys = [1, 2, 3] for ks, t, k in zip(pm.key_signature_changes, ks_times, ks_keys): assert ks.time == t assert ks.key_number == k
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.set_defaults(func=main) parser.add_argument('input_file', type=argparse.FileType('rb'), metavar='FILE') parser.add_argument('output_dir', type=str, metavar='OUTPUTDIR') parser.add_argument('-i', '--instrument', type=str, default='') parser.add_argument('--drums', action='store_true') parser.add_argument('-p', '--program', type=int) parser.add_argument('--stretch', type=str) parser.add_argument('--tempo', type=float) parser.add_argument('--time-signature', type=lambda s: tuple(int(x) for x in s.split('/')), default=(4, 4)) parser.add_argument('--resolution', type=int, default=480) parser.add_argument( '--range', type=lambda r: [None if x == '' else int(x) for x in r.split(':')]) parser.add_argument('--group-by-name', action='store_true') parser.add_argument('--time-unit', type=float) args = parser.parse_args() if args.program is None: if args.instrument: args.program = pretty_midi.instrument_name_to_program( args.instrument) else: args.program = 0 tempo = 60. if args.stretch: # Calculate the time stretch ratio if ':' in args.stretch: a, b = args.stretch.split(':') args.stretch = float(a) / float(b) tempo = float(b) else: args.stretch = float(args.stretch) tempo = tempo / args.stretch if args.tempo: tempo = args.tempo data = pickle.load(args.input_file) fill_length = len(str(len(data) - 1)) if args.range: data = data[slice(*args.range)] if args.group_by_name: grouped = collections.defaultdict(list) for (name, start, end), notes in data: start, end = start * args.time_unit, end * args.time_unit grouped[name].extend( _shift_and_clip(note, start, end) for note in notes if note.start + start < end and note.end > 0.) data = list(grouped.items()) for i, segment in enumerate(data): midi = pretty_midi.PrettyMIDI(initial_tempo=tempo, resolution=args.resolution) if args.range and args.range[0]: i += args.range[0] index = str(i).zfill(fill_length) if isinstance(segment, list): notes = segment fname = f'{index}.mid' elif isinstance(segment, tuple) and len(segment) == 2: segment_id, notes = segment if not isinstance(segment_id, str): segment_id = '_'.join(str(x) for x in segment_id) fname = f'{index}_{segment_id}.mid' else: raise RuntimeError(f'Cannot parse segment: {segment}') if args.stretch is not None: for note in notes: note.start *= args.stretch note.end *= args.stretch # Remove notes with length below the MIDI resolution notes = [ note for note in notes if midi.time_to_tick(note.start) < midi.time_to_tick(note.end) ] # Some notes might be overlapping, we need to split them between multiple tracks. # TODO: This calls for a more efficient implementation. tracks = [[]] for note in notes: for track in tracks: # Find the first track without an overlapping note. if not any(note2.pitch == note.pitch and note2.start < note.end and note2.end > note.start for note2 in track): track.append(note) break # Always keep the last track empty (we delete it afterwards) if tracks[-1]: tracks.append([]) del tracks[-1] midi.time_signature_changes[:] = [ pretty_midi.TimeSignature(*args.time_signature, 0.) ] for track in tracks: instrument = pretty_midi.Instrument(name=args.instrument, program=args.program, is_drum=args.drums) instrument.notes[:] = track midi.instruments.append(instrument) midi.write(os.path.join(args.output_dir, fname))
def tx1_to_midi(tx1): import pretty_midi tx1 = tx1.strip().splitlines() nsamps = sum([int(x.split('_')[1]) for x in tx1 if x[:2] == 'WT']) # Create MIDI instruments p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)') p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)') tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1') no_prog = pretty_midi.instrument_name_to_program('Breath Noise') p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False) p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False) tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False) no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True) name_to_ins = {'P1': p1, 'P2': p2, 'TR': tr, 'NO': no} name_to_pitch = {'P1': None, 'P2': None, 'TR': None, 'NO': None} name_to_start = {'P1': None, 'P2': None, 'TR': None, 'NO': None} name_to_max_velocity = {'P1': 15, 'P2': 15, 'TR': 1, 'NO': 15} samp = 0 for event in tx1: if event == '<eos>': continue if event[:2] == 'WT': samp += int(event[3:]) else: tokens = event.split('_') name = tokens[0] ins = name_to_ins[tokens[0]] old_pitch = name_to_pitch[name] if tokens[1] == 'NOTEON': if old_pitch is not None: ins.notes.append( pretty_midi.Note(velocity=name_to_max_velocity[name], pitch=old_pitch, start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = int(tokens[2]) name_to_start[name] = samp else: if old_pitch is not None: ins.notes.append( pretty_midi.Note(velocity=name_to_max_velocity[name], pitch=name_to_pitch[name], start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = None name_to_start[name] = None # Deactivating this for generated files #for name, pitch in name_to_pitch.items(): # assert pitch is None # Create MIDI and add instruments midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend([p1, p2, tr, no]) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.) midi.time_signature_changes.append(eos) # with tempfile.NamedTemporaryFile('rb') as mf: # midi.write(mf.name) # midi = mf.read() return midi
# Read the csv file score = Score(meta_path=meta_path, notes_path=notes_path, chords_path=chords_path) prev_measure = 0 prev_chord = "" # Get the number of quarter notes per minute tempo, unit = float(score.notes[0]["Tempo"].split("_")[0]), int(score.notes[0]["Tempo"].split("_")[-1]) tempo *= (4/unit) piano_c_chord = pretty_midi.PrettyMIDI(initial_tempo=tempo) # Get seconds per quarter note sec_per_quarter = 60/tempo # Create an Instrument instance for a piano instrument piano_program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano') piano = pretty_midi.Instrument(program=piano_program) t_sig = pretty_midi.TimeSignature(int(score.meta["Time"].split("_")[0]), int(score.meta["Time"].split("_")[1]), 0.0) A, B = int(score.meta["Time"].split("_")[0]), int(score.meta["Time"].split("_")[1]) # A is the number of quarter notes in a measure if B==2: A *= 2 for note in score.notes: # Note name -> Note numner note_number = int(note["Midi"]) # dur(sec) = #quarter_notes * sec_per_quarter_note = (duration_in_division_units / division_units_per_quarter_note) * sec_per_quarter_note dur = float(note["Duration"]) / float(score.meta['Division']) * sec_per_quarter # onset(sec) = (#quarter_notes + #parsed_measures * #quarter_notes_per_measure) * sec_per_quarter_note onset = (float(note["Onset"]) / float(score.meta["Division"]) + float(note["Measure"])*A) * sec_per_quarter # If this note is not a rest, then add it in. A grace note will have a 0 duration, so it will be ignored. if note_number > 0:
def tx1_to_midi(tx1, out_fp): import pretty_midi tx1 = tx1.strip().splitlines() nsamps = sum([int(x.split('_')[1]) for x in tx1 if x[:2] == 'WT']) GOOD_INSTRUMENTS = { 'Distortion Guitar': 30, 'Acoustic Grand Piano': 0, # AKA Drums...don't ask me why 'Overdriven Guitar': 29, 'Electric Bass (finger)': 33, 'Electric Bass (pick)': 34, 'Electric Guitar (clean)': 27, "Acoustic Guitar (steel)": 25, } CHANNELS_TO_NEW_NAMES = { 30: 'DG', 0: 'AGP', 29: 'OG', 33: 'EBF', 34: 'EBP', 27: 'EGC', 25: 'AGS' } # Create MIDI instruments name_to_ins = {} name_to_pitch = {} name_to_start = {} for instrument_name in GOOD_INSTRUMENTS: program = GOOD_INSTRUMENTS[instrument_name] noun = CHANNELS_TO_NEW_NAMES[program] instr = pretty_midi.Instrument(program=program, name=noun, is_drum=int(program == 0)) name_to_ins[noun] = instr name_to_pitch[noun] = None name_to_start[noun] = None samp = 0 for event in tx1: if event[:2] == 'WT': samp += int(event[3:]) else: tokens = event.split('_') name = tokens[0] ins = name_to_ins[tokens[0]] old_pitch = name_to_pitch[name] if tokens[1] == 'NOTEON': if old_pitch is not None: ins.notes.append( pretty_midi.Note(velocity=127, pitch=old_pitch, start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = int(tokens[2]) name_to_start[name] = samp else: if old_pitch is not None: ins.notes.append( pretty_midi.Note(velocity=127, pitch=name_to_pitch[name], start=name_to_start[name] / 44100., end=samp / 44100.)) name_to_pitch[name] = None name_to_start[name] = None # Deactivating this for generated files #for name, pitch in name_to_pitch.items(): # assert pitch is None # Create MIDI and add instruments midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend(name_to_ins.values()) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.) midi.time_signature_changes.append(eos) midi.write(out_fp) with tempfile.NamedTemporaryFile('rb') as mf: midi.write(mf.name) midi = mf.read() return midi
def test_df_to_midi(): df = pd.DataFrame({ "onset": 0, "track": [0, 0, 1], "pitch": [10, 20, 30], "dur": 1000 }) # Test basic writing fileio.df_to_midi(df, "test.mid") assert fileio.midi_to_df("test.mid").equals( df), "Writing df to MIDI and reading changes df." # Test that writing should overwrite existing notes df.pitch += 10 fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals( df), "Writing df to MIDI with existing MIDI does not overwrite notes." # Test that writing skips non-overwritten notes fileio.df_to_midi(df, "test2.mid", existing_midi_path="test.mid", excerpt_start=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [10, 20, 30, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test2.mid").equals( expected), "Writing to MIDI doesn't copy notes before excerpt_start" # Test that writing skips non-overwritten notes past end fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [0, 0, 1, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI doesn't copy notes after excerpt_length" df.track = 2 fileio.df_to_midi(df, "test.mid", existing_midi_path="test2.mid", excerpt_length=1000) expected = pd.DataFrame({ "onset": [0, 0, 0, 1000, 1000, 1000], "track": [2, 2, 2, 0, 0, 1], "pitch": [20, 30, 40, 20, 30, 40], "dur": 1000, }) assert fileio.midi_to_df("test.mid").equals( expected), "Writing to MIDI with extra track breaks" # Check all non-note events midi_obj = pretty_midi.PrettyMIDI("test.mid") midi_obj.instruments[0].name = "test" midi_obj.instruments[0].program = 100 midi_obj.instruments[0].is_drum = True midi_obj.instruments[0].pitch_bends.append(pretty_midi.PitchBend(10, 0)) midi_obj.instruments[0].control_changes.append( pretty_midi.ControlChange(10, 10, 0)) midi_obj.lyrics.append(pretty_midi.Lyric("test", 0)) midi_obj.time_signature_changes.append(pretty_midi.TimeSignature(2, 4, 1)) midi_obj.key_signature_changes.append(pretty_midi.KeySignature(5, 1)) midi_obj.write("test.mid") fileio.df_to_midi(expected, "test2.mid", existing_midi_path="test.mid") assert fileio.midi_to_df("test2.mid").equals(expected) # Check non-note events and data here new_midi = pretty_midi.PrettyMIDI("test2.mid") for instrument, new_instrument in zip(midi_obj.instruments, new_midi.instruments): assert instrument.name == new_instrument.name assert instrument.program == new_instrument.program assert instrument.is_drum == new_instrument.is_drum for pb, new_pb in zip(instrument.pitch_bends, new_instrument.pitch_bends): assert pb.pitch == new_pb.pitch assert pb.time == new_pb.time for cc, new_cc in zip(instrument.control_changes, new_instrument.control_changes): assert cc.number == new_cc.number assert cc.value == new_cc.value assert cc.time == new_cc.time for ks, new_ks in zip(midi_obj.key_signature_changes, new_midi.key_signature_changes): assert ks.key_number == new_ks.key_number assert ks.time == new_ks.time for lyric, new_lyric in zip(midi_obj.lyrics, new_midi.lyrics): assert lyric.text == new_lyric.text assert lyric.time == new_lyric.time for ts, new_ts in zip(midi_obj.time_signature_changes, new_midi.time_signature_changes): assert ts.numerator == new_ts.numerator assert ts.denominator == new_ts.denominator assert ts.time == new_ts.time for filename in ["test.mid", "test2.mid"]: try: os.remove(filename) except Exception: pass
def main(args): if args.program is None: if args.instrument: args.program = pretty_midi.instrument_name_to_program(args.instrument) else: args.program = 0 tempo = 60. if args.stretch: # Calculate the time stretch ratio if ':' in args.stretch: a, b = args.stretch.split(':') args.stretch = float(a) / float(b) tempo = float(b) else: args.stretch = float(args.stretch) tempo = tempo / args.stretch if args.tempo: tempo = args.tempo data = pickle.load(args.input_file) fill_length = len(str(len(data) - 1)) if args.range: data = data[slice(*args.range)] if args.group_by_name: grouped = collections.defaultdict(list) for (name, start, end), notes in data: start, end = start * args.time_unit, end * args.time_unit grouped[name].extend(_shift_and_clip(note, start, end) for note in notes if note.start + start < end and note.end > 0.) data = list(grouped.items()) for i, segment in enumerate(data): midi = pretty_midi.PrettyMIDI(initial_tempo=tempo, resolution=args.resolution) if args.range and args.range[0]: i += args.range[0] index = str(i).zfill(fill_length) if isinstance(segment, list): notes = segment fname = f'{index}.mid' elif isinstance(segment, tuple) and len(segment) == 2: segment_id, notes = segment if not isinstance(segment_id, str): segment_id = '_'.join(str(x) for x in segment_id) fname = f'{index}_{segment_id}.mid' else: raise RuntimeError(f'Cannot parse segment: {segment}') if args.stretch is not None: for note in notes: note.start *= args.stretch note.end *= args.stretch # Remove notes with length below the MIDI resolution notes = [note for note in notes if midi.time_to_tick(note.start) < midi.time_to_tick(note.end)] # Some notes might be overlapping, we need to split them between multiple tracks. # TODO: This calls for a more efficient implementation. tracks = [[]] for note in notes: for track in tracks: # Find the first track without an overlapping note. if not any(note2.pitch == note.pitch and note2.start < note.end and note2.end > note.start for note2 in track): track.append(note) break # Always keep the last track empty (we delete it afterwards) if tracks[-1]: tracks.append([]) del tracks[-1] midi.time_signature_changes[:] = [pretty_midi.TimeSignature(*args.time_signature, 0.)] for track in tracks: instrument = pretty_midi.Instrument(name=args.instrument, program=args.program, is_drum=args.drums) instrument.notes[:] = track midi.instruments.append(instrument) midi.write(os.path.join(args.output_dir, fname))
def emit_nesmdb_midi_examples(midi_fp, output_dir, min_num_instruments=1, filter_mid_len_below_seconds=5., filter_mid_len_above_seconds=600., filter_mid_bad_times=True, filter_ins_max_below=21, filter_ins_min_above=108, filter_ins_duplicate=True, output_include_drums=True, output_max_num=16, output_max_num_seconds=180.): midi_name = os.path.split(midi_fp)[1].split('.')[0] if min_num_instruments <= 0: raise ValueError() # Ignore unusually large MIDI files (only ~25 of these in the dataset) if os.path.getsize(midi_fp) > (512 * 1024): #512K return try: midi = pretty_midi.PrettyMIDI(midi_fp) except: return # Filter MIDIs with extreme length midi_len = midi.get_end_time() if midi_len < filter_mid_len_below_seconds or midi_len > filter_mid_len_above_seconds: return # Filter out negative times and quantize to audio samples for ins in midi.instruments: for n in ins.notes: if filter_mid_bad_times: if n.start < 0 or n.end < 0 or n.end < n.start: return n.start = round(n.start * 44100.) / 44100. n.end = round(n.end * 44100.) / 44100. instruments = midi.instruments # Filter out drum instruments drums = [i for i in instruments if i.is_drum] instruments = [i for i in instruments if not i.is_drum] # Filter out instruments with bizarre ranges instruments_normal_range = [] for ins in instruments: pitches = [n.pitch for n in ins.notes] min_pitch = min(pitches) max_pitch = max(pitches) if max_pitch >= filter_ins_max_below and min_pitch <= filter_ins_min_above: instruments_normal_range.append(ins) instruments = instruments_normal_range if len(instruments) < min_num_instruments: return # Sort notes for polyphonic filtering and proper saving for ins in instruments: ins.notes = sorted(ins.notes, key=lambda x: x.start) if output_include_drums: for ins in drums: ins.notes = sorted(ins.notes, key=lambda x: x.start) # Filter out polyphonic instruments instruments = [i for i in instruments if instrument_is_monophonic(i)] if len(instruments) < min_num_instruments: return # Filter out duplicate instruments if filter_ins_duplicate: uniques = set() instruments_unique = [] for ins in instruments: pitches = ','.join( ['{}:{:.1f}'.format(str(n.pitch), n.start) for n in ins.notes]) if pitches not in uniques: instruments_unique.append(ins) uniques.add(pitches) instruments = instruments_unique if len(instruments) < min_num_instruments: return # TODO: Find instruments that have a substantial fraction of the number of total notes """ min_notes_frac = num_instruments_to_min_notes_frac(num_instruments) total_num_notes = sum([len(i.notes) for i in instruments]) instruments = [i for i in instruments if (len(i.notes) / float(total_num_notes)) >= min_notes_frac] num_instruments = len(instruments) if num_instruments < min_num_instruments: return """ # TODO: ensure tempo and other metadata is alright # TODO: ensure number of notes is alright # Create assignments of MIDI instruments to NES instruments num_instruments = len(instruments) #-----Luis----- We assign instrument with most notes -> p1, 2nd most -> p2, 3rd most -> tr num_notes = [len(i.notes) for i in instruments] perm = list(np.argsort(num_notes)[::-1]) if num_instruments == 1: instrument_perms = [(0, -1, -1)] #, (-1, 0, -1), (-1, -1, 0)] elif num_instruments == 2: instrument_perms = [ tuple(perm + [-1]) ] #[(-1, 0, 1), (-1, 1, 0), (0, -1, 1), (0, 1, -1), (1, -1, 0), (1, 0, -1)] else: instrument_perms = [tuple(perm[:3])] #-----Luis end----- if len(instrument_perms) > output_max_num: instrument_perms = random.sample(instrument_perms, output_max_num) num_drums = len(drums) if output_include_drums else 0 instrument_perms_plus_drums = [] for perm in instrument_perms: selection = -1 if num_drums == 0 else max( range(len(drums)), key=lambda x: len(drums[x].notes)) #select drum with most notes instrument_perms_plus_drums.append(perm + (selection, )) instrument_perms = instrument_perms_plus_drums # Emit midi files for i, perm in enumerate(instrument_perms): # Create MIDI instruments p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)') p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)') tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1') no_prog = pretty_midi.instrument_name_to_program('Breath Noise') p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False) p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False) tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False) no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True) # Filter out invalid notes perm_mid_ins_notes = [] for mid_ins_id, nes_ins_name in zip(perm, ['p1', 'p2', 'tr', 'no']): if mid_ins_id < 0: perm_mid_ins_notes.append(None) else: if nes_ins_name == 'no': mid_ins = drums[mid_ins_id] mid_ins_notes_valid = mid_ins.notes else: mid_ins = instruments[mid_ins_id] mid_ins_notes_valid = [ n for n in mid_ins.notes if n.pitch >= nes_ins_name_to_min_pitch[nes_ins_name] and n.pitch <= nes_ins_name_to_max_pitch[nes_ins_name] ] perm_mid_ins_notes.append(mid_ins_notes_valid) assert len(perm_mid_ins_notes) == 4 # Calculate length of this ensemble start = None end = None for notes in perm_mid_ins_notes: if notes is None or len(notes) == 0: continue ins_start = min([n.start for n in notes]) ins_end = max([n.end for n in notes]) if start is None or ins_start < start: start = ins_start if end is None or ins_end > end: end = ins_end if start is None or end is None: continue # Clip if needed if (end - start) > output_max_num_seconds: end = start + output_max_num_seconds # Create notes for mid_ins_notes, nes_ins_name, nes_ins in zip( perm_mid_ins_notes, ['p1', 'p2', 'tr', 'no'], [p1, p2, tr, no]): if mid_ins_notes is None: continue if nes_ins_name == 'no': random_noise_mapping = [ random.randint(1, 16) for _ in range(128) ] last_nend = -1 for ni, n in enumerate(mid_ins_notes): nvelocity = n.velocity npitch = n.pitch nstart = n.start nend = n.end # Drums are not necessarily monophonic so we need to filter if nes_ins_name == 'no' and nstart < last_nend: continue last_nend = nend assert nstart >= start if nend > end: continue assert nend <= end nvelocity = 1 if nes_ins_name == 'tr' else int( round(1. + (14. * nvelocity / 127.))) assert nvelocity > 0 if nes_ins_name == 'no': npitch = random_noise_mapping[npitch] nstart = nstart - start nend = nend - start nes_ins.notes.append( pretty_midi.Note(nvelocity, npitch, nstart, nend)) # Add instruments to MIDI file midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend([p1, p2, tr, no]) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, end - start) midi.time_signature_changes.append(eos) # Save MIDI file out_fp = '{}_{}.mid'.format(midi_name, str(i).zfill(3)) out_fp = os.path.join(output_dir, out_fp) midi.write(out_fp)
def test_get_downbeats(): pm = pretty_midi.PrettyMIDI() # Add a note to force get_end_time() to be non-zero i = pretty_midi.Instrument(0) i.notes.append(pretty_midi.Note(100, 100, 0.3, 20.4)) pm.instruments.append(i) # pretty_midi assumes 120 bpm, 4/4 unless otherwise specified assert np.allclose(pm.get_downbeats(), np.arange(0, pm.get_end_time(), 4 * 60. / 120.)) # Testing starting from a different beat time assert np.allclose(pm.get_downbeats(.2), np.arange(0, pm.get_end_time(), 4 * 60. / 120.) + .2) # Testing a tempo change change_bpm = 93. change_time = 8.4 pm._tick_scales.append( (pm.time_to_tick(change_time), 60. / (change_bpm * pm.resolution))) pm._update_tick_to_time(pm.time_to_tick(pm.get_end_time())) # Track at 120 bpm up to the tempo change time expected_beats = np.arange(0, change_time, 4 * 60. / 120.) # BPM switches (4.5 - 4.4)/(60./120.) of the way through expected_beats = np.append( expected_beats, change_time + (10. - change_time) / (4 * 60. / 120.) * 4 * 60. / change_bpm) # From there, use the new bpm expected_beats = np.append( expected_beats, np.arange(expected_beats[-1] + 4 * 60. / change_bpm, pm.get_end_time(), 4 * 60. / change_bpm)) assert np.allclose(pm.get_downbeats(), expected_beats) # When requesting a start_time after the tempo change, make sure we just # track as normal assert np.allclose( pm.get_downbeats(change_time + .1), np.arange(change_time + .1, pm.get_end_time(), 4 * 60. / change_bpm)) # Add a time signature change, which forces beat tracking to restart pm.time_signature_changes.append(pretty_midi.TimeSignature(3, 4, 2.1)) # Track at 120 bpm up to time signature change expected_beats = np.arange(0, 2.1, 4 * 60. / 120.) # Now track, restarting from time signature change time expected_beats = np.append(expected_beats, np.arange(2.1, change_time, 3 * 60. / 120.)) # BPM switches (4.6 - 4.4)/(60./120.) of the way through expected_beats = np.append( expected_beats, change_time + (9.6 - change_time) / (3 * 60. / 120.) * 3 * 60. / change_bpm) # From there, use the new bpm expected_beats = np.append( expected_beats, np.arange(expected_beats[-1] + 3 * 60. / change_bpm, pm.get_end_time(), 3 * 60. / change_bpm)) assert np.allclose(pm.get_downbeats(), expected_beats) # When there are two time signature changes, make sure both get included pm.time_signature_changes.append(pretty_midi.TimeSignature(5, 4, 1.9)) expected_beats[expected_beats == 2.] = 1.9 assert np.allclose(pm.get_downbeats(), expected_beats) # Request a start time after time time signature change expected_beats = np.arange(2.2, change_time, 3 * 60. / 120.) expected_beats = np.append( expected_beats, change_time + (9.7 - change_time) / (3 * 60. / 120.) * 3 * 60. / change_bpm) expected_beats = np.append( expected_beats, np.arange(expected_beats[-1] + 3 * 60. / change_bpm, pm.get_end_time(), 3 * 60. / change_bpm)) assert np.allclose(pm.get_downbeats(2.2), expected_beats) # Test for compound meters pm = pretty_midi.PrettyMIDI() # Add a note to force get_end_time() to be non-zero i = pretty_midi.Instrument(0) i.notes.append(pretty_midi.Note(100, 100, 0.3, 20.4)) pm.instruments.append(i) # Simple test, assume 6/8 time for the entire piece pm.time_signature_changes.append(pretty_midi.TimeSignature(6, 8, 0)) assert np.allclose(pm.get_downbeats(), np.arange(0, pm.get_end_time(), 3 * 60. / 120.))
if incomplete_start: pitch_matrix = pitch_matrix[1:, :] # normalized pitch_matrix = (pitch_matrix.transpose() / pitch_matrix.transpose().sum(axis=0)).transpose() # HMM chord generator chords = generate_chords(pitch_matrix) # transform to pychord's Chord chords = [SYMBOL_CHORD_DICT.get(c) for c in chords] # generate a midi combining melody and chord midi = pretty_midi.PrettyMIDI(initial_tempo=bpm) ts = pretty_midi.TimeSignature(time_sig, 4, 0) # TODO: C key default now,support key trans in future ks = pretty_midi.KeySignature(0, 0) midi.time_signature_changes.append(ts) midi.key_signature_changes.append(ks) # melody track melody_track = pretty_midi.Instrument(program=0) for i in range(len(df_notes)): note = df_notes.iloc[i] pitch = MELODY_INIT_NOTE + YMLNOTE_VAL_DICT.get( note['key']) + 12 * note['octave'] _note = pretty_midi.Note(velocity=MELODY_VELOCITY, pitch=int(pitch),
while line: if len(line.strip()) != 0 and not skip: midis.append(matches[line.strip()]) skip = not skip line = lyr.readline() song_midi = pretty_midi.PrettyMIDI(midis[0]) insts = song_midi.instruments for i, mid in enumerate(midis[1:]): midi = pretty_midi.PrettyMIDI(mid) for j, inst in enumerate(midi.instruments): new_notes = inst.notes for n in new_notes: n.start += (i + 1) * bit_len n.end += (i + 1) * bit_len insts[j].notes += new_notes file = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) file.instruments.extend(insts) endtime = file.get_end_time() print(endtime) eos = pretty_midi.TimeSignature(1, 1, endtime) file.time_signature_changes = [eos] out_fp = '{}.mid'.format(f[:-4]) out_fp = os.path.join("./music/data/songs", out_fp) file.write(out_fp)
def rolls_to_midi(pianoroll, programs, save_folder, filename, bpm, velocity_roll=None, held_notes_roll=None): #bpm is in quarter notes, so scale accordingly bpm = bpm * (SMALLEST_NOTE / 4) pianoroll = np.pad(np.copy(pianoroll), ((0, 0), (low_crop, num_notes - high_crop)), mode='constant', constant_values=0) if not os.path.exists(save_folder): os.makedirs(save_folder) midi = pm.PrettyMIDI(initial_tempo=bpm, resolution=1000) midi.time_signature_changes.append(pm.TimeSignature(4, 4, 0)) for voice, program in enumerate(programs): current_instrument = pm.Instrument(program=program) current_pianoroll = pianoroll[voice::len(programs), :] if velocity_roll is not None: current_velocity_roll = np.copy( velocity_roll[voice::len(programs)]) #during the training, the velocities were scaled to be in the range 0,1 #scale it back to the actual velocity numbers current_velocity_roll[np.where( current_velocity_roll < velocity_threshold_such_that_it_is_a_played_note)] = 0 current_velocity_roll[np.where( current_velocity_roll >= velocity_threshold_such_that_it_is_a_played_note)] -= 0.5 current_velocity_roll /= ( 1.0 - velocity_threshold_such_that_it_is_a_played_note) current_velocity_roll *= MAX_VELOCITY if held_notes_roll is not None: current_held_notes_roll = np.copy( held_notes_roll[voice::len(programs)]) tracker = [] start_times = dict() velocities = dict() for i, note_vector in enumerate(current_pianoroll): notes = list(note_vector.nonzero()[0]) # #notes that were just played and need to be removed from the tracker removal_list = [] for note in tracker: #determine if you still hold this note or not hold_this_note = True if held_notes_roll is not None: hold_this_note = current_held_notes_roll[i] > 0.5 #it may happen that a note seems to be held but has switched to another channel #in that case, play the note anyways if note not in notes: hold_this_note = False else: hold_this_note = note in notes and ( i) % SMALLEST_NOTE is not 0 if hold_this_note: #held note, don't play a new note notes.remove(note) else: if velocity_roll is not None: velocity = velocities[note] if velocity > MAX_VELOCITY: velocity = int(MAX_VELOCITY) else: velocity = 80 midi_note = pm.Note(velocity=velocity, pitch=note, start=(60 / bpm) * start_times[note], end=(60 / bpm) * i) current_instrument.notes.append(midi_note) removal_list.append(note) for note in removal_list: tracker.remove(note) for note in notes: tracker.append(note) start_times[note] = i if velocity_roll is not None: velocities[note] = int(current_velocity_roll[i]) midi.instruments.append(current_instrument) midi.write(save_folder + filename + '.mid')
def fami_to_mid(input_txt_file, output_mid_file): """ Returns the duration of the track """ f = open(input_txt_file, 'r') d = f.read() f.close() d = d.replace('\t', '') channels = d.split('Channel Type') channels_list = [] instances_list = [] for i, channel in enumerate(channels): patterns = channel.split('Pattern Name') patterns_list = [] instances = {} for j, pattern in enumerate(patterns): pattern_list = [] notes = pattern.split('\n') for note in notes: if note.startswith('Note ') or note.startswith( 'PatternInstance '): if note.startswith('Note '): params_raw = note.split('Note ')[1] + '\n' else: params_raw = note.split('PatternInstance ')[1] + '\n' open_value = False temp = '' params = [] for c in params_raw: if c == '"': if open_value: open_value = False else: open_value = True if c in [' ', '\n'] and not open_value: params.append(temp) temp = '' else: temp += c if note.startswith('Note '): notes_values = [] for param in params: key, value = param.split('=') value = value[1:-1] notes_values.append((key, value)) pattern_list.append((pattern_name, notes_values)) else: instance = {} for param in params: key, value = param.split('=') value = value[1:-1] instance[key] = value instances[instance['Pattern']] = instance else: if note.startswith('="Pattern '): pattern_name = note[2:-1] patterns_list.append(pattern_list) channels_list.append(patterns_list) instances_list.append(instances) values_table = { 'C': 0, 'C#': 1, 'D': 2, 'D#': 3, 'E': 4, 'F': 5, 'F#': 6, 'G': 7, 'G#': 8, 'A': 9, 'A#': 10, 'B': 11 } notes_times = [] for c in channels_list: for p in c: for name, n in p: time = None for k, v in n: if k == 'Time': time = v if k == 'Value': notes_times.append(time) # Average Volumes notes_volumes = [] current_volume = 11 was_stop = True num_events = 0 volume = 0 for c in channels_list: for p in c: for name, n in p: # create dict from list of key values d = {} for k, v in n: d[k] = v # end of note if 'Value' in d: # TWO AGGREGATION METHODS: prev_volume = None if was_stop else int(volume) # prev_volume = None if was_stop else int(volume / num_events) notes_volumes.append(prev_volume) volume = 0 num_events = 0 if not d['Value'] == 'Stop': was_stop = False # means it's not a Stop or other: aggregate if 'Volume' in d: v = int(d['Volume']) # TWO AGGREGATION METHODS: volume = max(v, volume) # volume += v current_volume = v num_events += 1 else: if ('Value' in d): # if it's a true note, but without volume change or indication if (not d['Value'] == 'Stop'): # TWO AGGREGATION METHODS: volume = max(current_volume, volume) # volume += current_volume num_events += 1 was_stop = False else: was_stop = True # Write midi # Constants: channel_index_to_prog = {1: 80, 2: 81, 3: 38, 4: 121} prog_to_instrument_name = {80: 'p1', 81: 'p2', 38: 'tr', 121: 'no'} # Midi midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) current_note = 0 max_end = 0 for channel_index, c in enumerate(channels_list): if channel_index not in channel_index_to_prog: continue # Instrument prog = channel_index_to_prog[channel_index] instrument_name = prog_to_instrument_name[prog] instrument_notes = [] instrument = pretty_midi.Instrument(program=prog, name=instrument_name, is_drum=(prog >= 112)) insts = instances_list[channel_index] for i, p in enumerate(c): for note_index, (name, n) in enumerate(p): inst = insts[name] inst_time = int(inst['Time']) * 256 time = 0 value = None # create dict from list of key values d = {} for k, v in n: d[k] = v if 'Time' in d: v = d['Time'] time = int(v) + inst_time try: time_end = int( notes_times[current_note + 1]) + inst_time length = time_end - time if length < 0: length = time_end - (time - 256) except: length = 1 try: volume = notes_volumes[current_note + 1] if volume is not None: last_volume = volume except: # TODO pb with the last event # print('Should only be the last character') volume = last_volume if 'Value' in d: current_note += 1 v = d['Value'] if v == 'Stop': value = None else: value = values_table[v[0:-1]] + 12 * (int(v[-1]) + 1) # TODO never used # elif k == 'FinePitch': # pitch = -(int(v) / 128) * 100 * 12 if (not value is None): time = float(time) / 256 * 4 length = float(length) / 256 * 4 note = pretty_midi.Note(velocity=volume, start=time, end=time + length, pitch=value) instrument_notes.append(note) if time + length > max_end: max_end = time + length instrument.notes = instrument_notes midi.instruments.append(instrument) ts = pretty_midi.TimeSignature(4, 4, 0) eos = pretty_midi.TimeSignature(1, 1, max_end) midi.time_signature_changes.extend([ts, eos]) midi.write(output_mid_file) return max_end
def exprsco_to_midi(exprsco): import pretty_midi rate, nsamps, exprsco = exprsco # Create MIDI instruments p1_prog = pretty_midi.instrument_name_to_program('Lead 1 (square)') p2_prog = pretty_midi.instrument_name_to_program('Lead 2 (sawtooth)') tr_prog = pretty_midi.instrument_name_to_program('Synth Bass 1') no_prog = pretty_midi.instrument_name_to_program('Breath Noise') p1 = pretty_midi.Instrument(program=p1_prog, name='p1', is_drum=False) p2 = pretty_midi.Instrument(program=p2_prog, name='p2', is_drum=False) tr = pretty_midi.Instrument(program=tr_prog, name='tr', is_drum=False) no = pretty_midi.Instrument(program=no_prog, name='no', is_drum=True) # Iterate through score to extract channel notes notes = {} ccs = {} for i, ch in enumerate(np.split(exprsco, 4, axis=1)): ch = ch[:, 0, :] # MIDI doesn't allow velocity 0 messages so set tr velocity to 1 if i == 2: ch[:, 1] = 1 last_velocity = 1 else: last_velocity = 0 last_note = 0 last_timbre = 0 note_starts = [] note_ends = [] ch_ccs = [] for s, (note, velocity, timbre) in enumerate(ch): if note != last_note: if note == 0: note_ends.append(s) else: if last_note == 0: note_starts.append((s, note, velocity)) else: note_ends.append(s) note_starts.append((s, note, velocity)) else: if velocity != last_velocity: ch_ccs.append((s, 11, velocity)) if timbre != last_timbre: ch_ccs.append((s, 12, timbre)) last_note = note last_velocity = velocity last_timbre = timbre if last_note != 0: note_ends.append(s + 1) assert len(note_starts) == len(note_ends) notes[i] = zip(note_starts, note_ends) ccs[i] = ch_ccs # Add notes to MIDI instruments for i, ins in enumerate([p1, p2, tr, no]): for (start_samp, note, velocity), end_samp in notes[i]: assert end_samp > start_samp start_t, end_t = start_samp / 44100., end_samp / 44100. note = pretty_midi.Note(velocity=velocity, pitch=note, start=start_t, end=end_t) ins.notes.append(note) for samp, cc_num, arg in ccs[i]: cc = pretty_midi.ControlChange(cc_num, arg, samp / 44100.) ins.control_changes.append(cc) # Add instruments to MIDI file midi = pretty_midi.PrettyMIDI(initial_tempo=120, resolution=22050) midi.instruments.extend([p1, p2, tr, no]) # Create indicator for end of song eos = pretty_midi.TimeSignature(1, 1, nsamps / 44100.) midi.time_signature_changes.append(eos) # Write/read MIDI file mf = tempfile.NamedTemporaryFile('rb') midi.write(mf.name) midi = mf.read() mf.close() return midi
def main(): """User interface.""" parser = argparse.ArgumentParser( description='Preprocess (quantize, simplify, merge ..) and augment ' 'complex MIDI files for machine learning purposes and ' 'dataset generation of multipart MIDI scores.') parser.add_argument('files', metavar='path', nargs='+', help='path of input files (.mid). ' 'accepts * as wildcard') parser.add_argument('--target_folder', metavar='path', help='folder path where ' 'generated results are stored', default=common.DEFAULT_TARGET_FOLDER) parser.add_argument('--interval_low', metavar='0-127', type=int, help='lower end of transpose interval', choices=range(0, 127), default=INTERVAL_LOW) parser.add_argument('--interval_high', metavar='0-127', help='higher end of transpose interval', type=int, choices=range(0, 127), default=INTERVAL_HIGH) parser.add_argument('--time_signature', metavar='4/4', type=str, help='converts score to given time signature') parser.add_argument('--valid', metavar='3/4', nargs='*', type=str, help='keep these time signatures, remove others') parser.add_argument('--instrument', metavar='name', help='converts parts to given instrument', default=DEFAULT_INSTRUMENT) parser.add_argument('--voice_num', metavar='1-32', type=int, help='converts to this number of parts', choices=range(1, 32), default=VOICE_NUM) parser.add_argument('--bpm', metavar='1-320', type=int, help='global tempo of score', choices=range(1, 320), default=DEFAULT_BPM) parser.add_argument('--voice_distribution', metavar='0.0-1.0', nargs='+', type=common.restricted_float, help='defines maximum size of alternative options ' 'per voice (0.0 - 1.0)', default=VOICE_DISTRIBUTION) parser.add_argument('--part_ratio', metavar='0.0-1.0', type=common.restricted_float, help='all notes / part notes ratio threshold ' 'to remove too sparse parts', default=SCORE_PART_RATIO) args = parser.parse_args() file_paths = common.get_files(args.files) default_bpm = args.bpm default_instrument = args.instrument interval_high = args.interval_high interval_low = args.interval_low score_part_ratio = args.part_ratio target_folder_path = args.target_folder voice_distribution = args.voice_distribution voice_num = args.voice_num if args.time_signature: default_time_signature = [ int(i) for i in args.time_signature.split('/') ] else: default_time_signature = DEFAULT_TIME_SIGNATURE if args.valid: valid_time_signatures = [] for signature in args.valid: if '/' in signature: valid_time_signatures.append( [int(i) for i in signature.split('/')]) else: common.print_error('Error: Invalid time signature!') else: valid_time_signatures = VALID_TIME_SIGNATURES # Do some health checks before we start if interval_high - interval_low < 12: common.print_error('Error: Interval range is smaller than an octave!') test = 1.0 - np.sum(voice_distribution) if test > 0.001 or test < 0: common.print_error('Error: voice distribution sum is not 1.0!') if len(voice_distribution) != voice_num: common.print_error('Error: length of voice distribution is not ' 'equals the number of voices!') common.check_target_folder(target_folder_path) for file_path in file_paths: if common.is_invalid_file(file_path): continue # Import MIDI file print('➜ Import file at "{}" ..'.format(file_path)) # Read MIDi file and clean up score = midi.PrettyMIDI(file_path) score.remove_invalid_notes() print('Loaded "{}".'.format(file_path)) if get_end_time(score, default_bpm, default_time_signature) == 0.0: print_warning('Original score is too short! Stop here.', file_path) continue # Remove invalid time signatures temp_score = filter_time_signatures(score, valid_time_signatures, default_bpm, default_time_signature) # Remove sparse instruments remove_sparse_parts(temp_score, score_part_ratio) if len(temp_score.instruments) < voice_num: print_warning('Too little voices given! Stop here.', file_path) continue # Identify ambitus group for every instrument groups = identify_ambitus_groups(temp_score, voice_num, voice_distribution) # Transpose within an interval transpose(temp_score, interval_low, interval_high) # Check which parts we can combine combination_options = [] for group_index in range(0, voice_num): options = np.argwhere(groups == group_index).flatten() combination_options.append(options) print('Parts {} in group {} (size = {}).'.format( options, group_index, len(options))) # Build a tree to traverse to find all combinations tree = create_combination_tree(combination_options, 0) combinations = traverse_combination_tree(tree, single_combination=[]) print('Found {} possible combinations.'.format(len(combinations))) # Prepare a new score with empty parts for every voice new_score = midi.PrettyMIDI(initial_tempo=default_bpm) temp_end_time = get_end_time(temp_score, default_bpm, default_time_signature) if temp_end_time < 1.0: print_warning( 'Score is very short, ' 'maybe due to time signature ' 'filtering. Skip this!', file_path) continue new_score.time_signature_changes = [ midi.TimeSignature(numerator=default_time_signature[0], denominator=default_time_signature[1], time=0.0) ] for i in range(0, voice_num): program = midi.instrument_name_to_program(default_instrument) new_instrument = midi.Instrument(program=program) new_score.instruments.append(new_instrument) # Add parts in all possible combinations for combination_index, combination in enumerate(combinations): offset = combination_index * temp_end_time for instrument_index, temp_instrument_index in enumerate( reversed(combination)): for note in temp_score.instruments[ temp_instrument_index].notes: new_score.instruments[instrument_index].notes.append( copy_note(note, offset)) print('Generated combination #{0:03d}: {1}'.format( combination_index + 1, combination)) # Done! new_end_time = get_end_time(new_score, default_bpm, default_time_signature) print('Generated score with duration {0} seconds. ' 'Data augmentation of {1:.0%}!'.format( round(new_end_time), ((new_end_time / temp_end_time) - 1))) # Write result to MIDI file new_file_path = common.make_file_path(file_path, target_folder_path, suffix='processed') new_score.write(new_file_path) print('Saved MIDI file at "{}".'.format(new_file_path)) print('') if len(warnings) > 0: print('Warnings given:') for warning in warnings: print('* "{}" in "{}".'.format(warning[0], warning[1])) print('') print('Done!')