def convert(arr, name): mid = mido.MidiFile() mid.ticks_per_beat = 960 track = mido.MidiTrack() mid.tracks.append(track) track.append(mido.MetaMessage("time_signature", numerator=4, denominator=4)) track.append(mido.MetaMessage("set_tempo", tempo = 600000)) i = 0 first = 1 for el in arr: #print(el) if el[0] == 1: track.append(npy2msg(el)) mid.save('songs/'+name+str(i)+'.mid') i += 1 first = 1 elif first == 1: first = 0 mid = mido.MidiFile() mid.ticks_per_beat = 960 track = mido.MidiTrack() mid.tracks.append(track) track.append(mido.MetaMessage("time_signature", numerator=4, denominator=4)) track.append(mido.MetaMessage("set_tempo", tempo = 600000)) track.append(npy2msg(el)) else: track.append(npy2msg(el)) mid.save('songs/'+name+str(i)+'.mid')
def notes_to_mid(notes): midi = mido.MidiFile() midi.ticks_per_beat = 100 meta_track = mido.MidiTrack() midi.tracks.append(meta_track) meta_track.append(mido.MetaMessage("track_name", name="generated")) meta_track.append(mido.MetaMessage("track_name", name="Conductor Track")) meta_track.append(mido.MetaMessage("marker", text="Setup")) meta_track.append(mido.MetaMessage("key_signature", key="C")) # meta_track.append(mido.MetaMessage("time_signature", numerator=1, denominator=4, clocks_per_click=24, notated_32nd_notes_per_beat=8)) meta_track.append(mido.MetaMessage("set_tempo", tempo=1000000)) # meta_track.append(mido.MetaMessage("end_of_track", time=?????)) main_track = mido.MidiTrack() midi.tracks.append(main_track) main_track.append(mido.MetaMessage("track_name", name="main")) main_track.append(mido.Message("control_change", channel=0, control=121, value=0)) main_track.append(mido.Message("control_change", channel=0, control=7, value=100)) main_track.append(mido.Message("control_change", channel=0, control=10, value=64)) main_track.append(mido.Message("control_change", channel=0, control=0, value=0)) main_track.append(mido.Message("control_change", channel=0, control=32, value=0)) main_track.append(mido.Message("program_change", channel=0, program=0)) for (melody, length) in notes: if melody == 0: main_track.append(mido.Message("note_on", channel=0, note=0, velocity=0, time=length)) else: main_track.append(mido.Message("note_on", channel=0, note=melody + 1, velocity=100, time=0)) main_track.append(mido.Message("note_on", channel=0, note=melody + 1, velocity=0, time=length)) return midi
def generate_from_sequence(chords_sequence): midi = mido.MidiFile() midi_events = [] track2 = mido.MidiTrack() track1 = mido.MidiTrack() midi.tracks.append(track1) midi.tracks.append(track2) track1.append(mido.MetaMessage('set_tempo')) time = 0 tempo = 120 val_min_duration = int( mido.second2tick((2 * constants.MIN_DURATION), midi.ticks_per_beat, mido.bpm2tempo(tempo))) chords_before = [] for chord in chords_sequence: current_chord = [] note_off = False for i in range(len(chord) - 1, -1, -1): for j in range(0, chord[i]): note = decode_one_note(i) if note >= constants.MIN_PITCH: current_chord.append(note) if note not in chords_before: midi_events.append( mido.Message("note_on", note=note, velocity=64, time=time * val_min_duration)) time = 0 else: chords_before.remove(note) elif note == -1: if not note_off: time += 1 midi_events.append( mido.Message("note_off", note=chords_before.pop(), velocity=64, time=time * val_min_duration)) time = 0 note_off = True if not note_off: time += 1 chords_before = current_chord for midi_event in midi_events: track2.append(midi_event) midi.save("new_song.mid")
def convert_numpy_to_midi(notes_np, output_name="exported_midi_from_numpy", upscale=1, tempo=500000): subset = np.array([v for v in notes_np for _ in range(upscale)]) mid_new = mido.MidiFile(ticks_per_beat=384) track_meta = mido.MidiTrack() track_notes = mido.MidiTrack() track_meta.append(mido.MetaMessage("set_tempo", tempo=tempo)) track_meta.append( mido.MetaMessage("time_signature", clocks_per_click=24, denominator=4, numerator=4, time=0, notated_32nd_notes_per_beat=8)) track_meta.append(mido.MetaMessage("end_of_track", time=1)) is_on_list = [False for _ in range(129)] time = 0 #to_append = (note,velo) to_append = (0, 0) for v in subset: is_on_list_old = is_on_list #get a copy for note, is_on in enumerate(v): cond1 = is_on and not is_on_list[note] cond2 = not is_on and is_on_list[note] if cond1 or cond2: #save prev note with correct time note_prev, velo_prev = to_append track_notes.append( mido.Message('note_on', note=note_prev, velocity=velo_prev, time=time)) time = 0 if cond1: #note just activated is_on_list[note] = True to_append = (note, 64) elif cond2: #note just deactivated is_on_list[note] = False to_append = (note, 0) #if note is playing and it has been noted in the list, then ignore time += 1 mid_new.tracks.append(track_meta) mid_new.tracks.append(track_notes) mid_new.save(output_name)
def write_song(song, filename): """ Save the song on disk Args: song (Song): a song object containing the tracks and melody filename (str): the path were to save the song (don't add the file extension) """ midi_data = mido.MidiFile(ticks_per_beat=song.ticks_per_beat) # Define track 0 new_track = mido.MidiTrack() midi_data.tracks.append(new_track) new_track.extend(song.tempo_map) for i, track in enumerate(song.tracks): # Define the track new_track = mido.MidiTrack() midi_data.tracks.append(new_track) new_track.append( mido.Message('program_change', program=0, time=0)) # Played with standard piano messages = [] for note in track.notes: # Add all messages in absolute time messages.append( mido.Message( 'note_on', note=note. note, # WARNING: The note should be int (NOT np.int64) velocity=64, channel=i, time=note.tick)) messages.append( mido.Message('note_off', note=note.note, velocity=64, channel=i, time=note.tick + note.duration)) # Reorder the messages chronologically messages.sort(key=lambda x: x.time) # Convert absolute tick in relative tick last_time = 0 for message in messages: message.time -= last_time last_time += message.time new_track.append(message) midi_data.save(filename + '.mid')
def piano_roll_to_midi(piece): piece = np.concatenate([piece, [[np.nan, np.nan, np.nan]]], axis=0) bpm = 50 microseconds_per_beat = 60 * 1000000 / bpm mid = mido.MidiFile() tracks = {'melody': mido.MidiTrack()} past_pitches = {'melody': np.nan} delta_time = {'melody': 0} metatrack = mido.MidiTrack() metatrack.append( mido.MetaMessage('set_tempo', tempo=int(microseconds_per_beat), time=0)) mid.tracks.append(tracks['melody']) mid.tracks.append(metatrack) tracks['melody'].append(mido.Message('program_change', program=52, time=0)) for i in range(len(piece)): pitches = {'melody': piece[i, 2]} if np.isnan(past_pitches['melody']): past_pitches['melody'] = None if np.isnan(pitches['melody']): pitches['melody'] = None if pitches['melody'] != past_pitches['melody']: if past_pitches['melody']: tracks['melody'].append( mido.Message('note_off', note=int(past_pitches['melody']), velocity=64, time=delta_time['melody'])) delta_time['melody'] = 0 if pitches['melody']: tracks['melody'].append( mido.Message('note_on', note=int(pitches['melody']), velocity=64, time=delta_time['melody'])) past_pitches['melody'] = pitches['melody'] delta_time['melody'] += 120 return mid
def chordadd(chordbass): mid = mido.MidiFile("test.mid") trackmain = mido.MidiTrack() track1 = mido.MidiTrack() track2 = mido.MidiTrack() track3 = mido.MidiTrack() track4 = mido.MidiTrack() mid.tracks.append(trackmain) mid.tracks.append(track1) mid.tracks.append(track2) mid.tracks.append(track3) mid.tracks.append(track4) #track4.append(Message('program_change', channel=0, program=8, time=0)) y = int(mid.ticks_per_beat) y = int(y) print(y) for data in chordbass: data = data - 24 ''' track1.append(mido.Message('note_on', note=data, velocity=70, time=0)) track1.append(mido.Message('note_off', note=data, velocity=70, time=y)) track2.append(mido.Message('note_on', note=data+4, velocity=70, time=0)) track2.append(mido.Message('note_off', note=data+4, velocity=70, time=y)) track3.append(mido.Message('note_on', note=data+7, velocity=70, time=0)) track3.append(mido.Message('note_off', note=data+7, velocity=70, time=y)) ''' track4.append(mido.Message('note_on', note=data, velocity=70, time=0)) track4.append(mido.Message('note_off', note=data, velocity=70, time=y)) track4.append(mido.Message('note_on', note=data + 4, velocity=70, time=0)) track4.append(mido.Message('note_off', note=data + 4, velocity=70, time=y)) ''' track4.append(mido.Message('note_on', note=data + 7, velocity=70, time=0)) track4.append(mido.Message('note_off', note=data + 7, velocity=70, time=y)) track4.append(mido.Message('note_on', note=data + 12, velocity=70, time=0)) track4.append(mido.Message('note_off', note=data + 12, velocity=70, time=y)) ''' mid.save("a1.mid")
def write_midi(filename, notes, tpb): mid = mido.MidiFile( ticks_per_beat=tpb ) # copy ticks_per_beat from source to avoid rounding errors track = mido.MidiTrack() mid.tracks.append(track) tempo = mido.bpm2tempo(120) track.append(mido.MetaMessage('set_tempo', tempo=tempo)) track.append(mido.MetaMessage('time_signature')) track.append(mido.Message('program_change', program=0)) events = [(n[0], n[1], 'note_on') for n in notes] events.extend([(n[0], n[2], 'note_off') for n in notes]) events = sorted(events, key=lambda n: n[1]) time = t0 = 0 for pitch, t1, eventtype in events: time += t1 - t0 dt = mido.second2tick(t1 - t0, tpb, tempo) message = mido.Message(eventtype, note=pitch, velocity=64, time=round(dt)) track.append(message) t0 = t1 mid.save(filename)
def convert_hex_file_with_key(infile, outfile): midi_file = mido.MidiFile() track = mido.MidiTrack() midi_file.tracks.append(track) line_num = 0 errors = 0 for line in infile: line_num += 1 try: hex_str = line.rstrip("\n").split(",") msg = (mido.Message.from_hex(hex_str[0])) if len(hex_str) == 3: msg.time = int(float(hex_str[2]) * 1000) track.append(msg) except Exception as e: # We don't want to stop when our ML algo makes nonsense errors += 1 print(e) continue # G-d save the queen midi_file.save(outfile) print("Done!\nOut of {} messages, there were {} errors".format( line_num, errors))
def test(): aaa = mido.MidiFile('AUD_DW0146.mid') aaa.tracks print 'buya' mid_dict = midifile_to_dict(aaa) track_data = np.array(mid_dict['tracks'][0]) notes_inds = np.flatnonzero( np.array([ 'note' in mid_dict['tracks'][0][idx] for idx in xrange(len(track_data)) ])) notes_data = track_data[notes_inds] outfile = mido.MidiFile() track = mido.MidiTrack() outfile.tracks.append(track) notes_inds_to_keep = np.array(range( 10, 50, 1)) # inds in the levenshtein mat that are similar orig_notes_inds_to_keep = set(notes_inds[notes_inds_to_keep]) for idx in xrange(len(track_data) - 1, -1, -1): msg = aaa.tracks[0][idx] if 'note' in msg.type and idx not in orig_notes_inds_to_keep: aaa.tracks[0].pop(idx) aaa.save('part_melody.mid')
def reconversion(list_track_one, filepath): ''' reconversing codes to midi file ''' mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) print("\n----Making Midi File----\n") print("Ticks per beat:{}\n".format(mid.ticks_per_beat)) track.append(mido.MetaMessage('set_tempo', tempo=500000, time=0)) track.append(mido.MetaMessage('track_name', name='Piano', time=0)) track.append(mido.Message('program_change', program=0, time=0)) for i in range(len(list_track_one)): track.append( mido.Message('note_on', note=list_track_one[i][1], velocity=list_track_one[i][2], time=50)) track.append( mido.Message('note_off', note=list_track_one[i][1], velocity=list_track_one[i][1], time=list_track_one[i][0])) mid.save(filepath)
def change_tempo(filename, data_path, target_path): mid = mido.MidiFile(data_path + filename) new_mid = mido.MidiFile() new_mid.ticks_per_beat = mid.ticks_per_beat for track in mid.tracks: new_track = mido.MidiTrack() for msg in track: new_msg = msg.copy() if new_msg.type == 'set_tempo': new_msg.tempo = 500000 # if msg.type == 'note_on' or msg.type == 'note_off': if discretize_time: print(msg.time) new_msg.time = myround(msg.time, base=mid.ticks_per_beat / (discritezition / 4)) # msg.time = myround(msg.time, base=mid.ticks_per_beat/(discritezition/4) ) if offset_time: # print('first:', time) print((mid.ticks_per_beat / (offset / 4))) new_msg.time = int(msg.time + mid.ticks_per_beat / (offset)) # print('second:', new_time) # print('diff:',time ) # msg.time = time new_track.append(new_msg) new_mid.tracks.append(new_track) new_mid.save(target_path + filename)
def convert(onehotarray_sequence, filename): # create midi midi_file = mido.MidiFile() track = mido.MidiTrack() midi_file.tracks.append(track) # before the midi there was nothing (we assume that there were no notes playing before we created the midi) last_array = np.zeros(128) timer = 0 for onehotarray in onehotarray_sequence: # get changed indices (pitches) via xor indices = np.arange(0, 128, 1)[np.logical_xor(last_array, onehotarray)] for index in indices: # if new value == 0: add note-off and reset timer if onehotarray[index] == 0: track.append(mido.Message('note_off', note=index, velocity=127, time=timer)) timer = 0 # if new value == 1: add note-on and reset timer if onehotarray[index] == 1: track.append(mido.Message('note_on', note=index, velocity=127, time=timer)) timer = 0 # increase timer by array sample-length in midi ticks (default: 960 (tps) * 0.1 (s)) timer = timer + 96 last_array = onehotarray # switch off all notes that are still active in the end for index in np.arange(0, 128, 1)[last_array == 1]: track.append(mido.Message('note_off', note=index, velocity=127, time=timer)) timer = 0 # switch them off at once midi_file.save(filename)
def play_song_or_save(all_mid, save, programs): list1 = get_songs_msgs(all_mid) ticksperbeat = get_ticksperbeat(all_mid) channels_dict = get_channels_dict(list1) mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) mid.ticks_per_beat = ticksperbeat track.ticks_per_beat = ticksperbeat channels = [] for program in programs: channels.append(channels_dict[program]) channels_lists_dict = get_lists_for_all_channels(channels, list1) put_together_song(track, channels_lists_dict, list1, channels, programs, channels_dict) if save: mid.save('new_song.mid') # need to change this else: mid.save('new_song.mid') play_with_pygame('new_song.mid')
def __init__(self, midi_file="utils\midi\output.mid", virtual_port=MIDI_PORT, instrument=0, output_msg_type=OUTPUT_MSG_TYPE): # instantiate midi self.midi_file = midi_file self.midi = mido.MidiFile() self.track = mido.MidiTrack() self.midi.tracks.append(self.track) self.track.append(mido.Message('program_change', program=12, time=0)) self.midi_msgs = [] # ports self.virtual_port = virtual_port self.outport = mido.open_output(self.virtual_port) # self.inport = mido.open_input(self.virtual_port) # initialize time self.start_time = time.time() self.prev_time = self.start_time # initialize pygame # pygame.midi.init() # self.player = pygame.midi.Output(0) # self.player.set_instrument(instrument) # initialize output settings: self.output_msg_type = output_msg_type
def add_drum(mid1, output_midi, tempo, mean_beat, beat_num, type, velocity): mid = mid1 track2 = mido.MidiTrack() mid.tracks.append(track2) tempo = mido.bpm2tempo(tempo) track2.append( mido.MetaMessage('set_tempo', tempo=round(tempo * slow), time=0)) gap = 480 for i in range(0, beat_num): track2.append(mido.Message('note_on', note=60, velocity=0, time=0)) track2.append(mido.Message('note_off', note=60, velocity=0, time=gap)) if (i % 2 == 1): track2.append(mido.Message('program_change', program=type, time=0)) # 这个音轨使用的乐器 track2.append( mido.Message('note_on', note=24, velocity=velocity - 30, time=0)) track2.append( mido.Message('note_off', note=24, velocity=velocity - 30, time=0)) else: track2.append(mido.Message('program_change', program=type, time=0)) # 这个音轨使用的乐器 track2.append( mido.Message('note_on', note=14, velocity=velocity, time=0)) track2.append( mido.Message('note_off', note=14, velocity=velocity, time=0)) output_midi = getFilename(output_midi, type) # mid.save(output_midi) return mid, output_midi
def shift_tones(orig_midi, semitones): midfile = mido.MidiFile(orig_midi) newfile = mido.MidiFile() newfile.ticks_per_beat = midfile.ticks_per_beat logger.debug("Midi Information: {}".format(orig_midi)) logger.debug("\tTicks per beat: {}".format(newfile.ticks_per_beat)) logger.debug("\tTotal tracks: {}".format(len(midfile.tracks))) logger.debug("\tMidi Type: {}".format(midfile.type)) for idx, track in enumerate(midfile.tracks): new_track = mido.MidiTrack() for msg in track: if msg.type in ['note_on', 'note_off']: new_note = msg.note + semitones if new_note < 0 or new_note > 127: new_note = new_note - semitones new_track.append(msg.copy(note=new_note, velocity=0)) else: new_track.append(msg.copy(note=new_note)) else: new_track.append(msg.copy()) newfile.tracks.append(new_track) # head, tail = os.path.split(args.file) # newfile.save(os.path.join(head, 'minus-2-semitones-{}'.format(tail))) display_notes(midfile, newfile, semitones) return newfile
def make_midi(input_wav, notes, tempo, mean_beat, instrument, velocity): mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) tempo = mido.bpm2tempo(tempo) track.append( mido.MetaMessage('set_tempo', tempo=round(tempo * slow), time=0)) track.append(mido.Message('program_change', program=instrument, time=0)) for note in notes: gap = int(round((note[1] * 480) / mean_beat)) if (note[0] == 'r'): track.append(mido.Message('note_on', note=60, velocity=0, time=0)) track.append( mido.Message('note_off', note=60, velocity=0, time=gap)) else: note_num = librosa.note_to_midi(note[0]) track.append( mido.Message('note_on', note=note_num, velocity=velocity, time=0)) track.append( mido.Message('note_off', note=note_num, velocity=velocity, time=gap)) output_midi = getFilename(input_wav, instrument) # mid.save(output_midi) return mid, output_midi
def createMIDI(self, jfs): mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) temp = tempfile.NamedTemporaryFile() for jf in jfs: for line in jf["lines"]: for bar in line: for note in bar: if note["type"] == "single note": self.play_note(note["name"], 2 / int(note["length"]) + (1 / int(note["length"])) * note["dotted"] * 0, track, base_num=note["pitch"]) elif note["type"] == "tuple note": for single in note["group"]: self.play_note(single["name"], 2 / int(single["length"]) + (1 / int(single["length"])) * single[ "dotted"] * 0, track, base_num=single["pitch"]) elif note["type"] == "rest": self.play_note([0], 2 / int(note["length"]) + (1 / int(note["length"])) * note["dotted"] * 0, track, velocity=0.0) mid._save(temp) temp.seek(0) return temp
def multi_pianoroll_to_midi(file_name, bpm, pianoroll_dic): # 1.初始化 mid = mido.MidiFile() tracks = {} # 要保存的音轨信息 first_track = True midi_tempo = round(60000000 / bpm) # 这首歌的速度(每一拍多少微秒) # 2.保存音符 for key in pianoroll_dic: # 2.1.定义音轨名称/使用乐器等 tracks[key] = mido.MidiTrack() # 定义新的音轨 mid.tracks.append(tracks[key]) # 在midi中添加这个音轨 if first_track: tracks[key].append(mido.MetaMessage('set_tempo', tempo=midi_tempo, time=0)) # 设置歌曲的速度 first_track = False tracks[key].append(mido.MetaMessage('track_name', name=pianoroll_dic[key]['name'], time=0)) # 这个音轨的名称 tracks[key].append(mido.Message('program_change', program=pianoroll_dic[key]['program'], time=0, channel=key)) # 这个音轨使用的乐器 # 2.2.从piano_dict中获取音符列表并转化为midi message的形式 note_list = [] for note_it in pianoroll_dic[key]['note']: note_list.append(['on', note_it[0], note_it[1], note_it[2]]) note_list.append(['off', note_it[0] + note_it[3], note_it[1], note_it[2]]) note_list = sorted(note_list, key=lambda item: item[1]) # 按照音符的时间排序 # 2.3.往tracks中保存这些音符 current_note_time = 0 for note_it in note_list: if note_it[0] == 'on': tracks[key].append(mido.Message('note_on', note=note_it[2], velocity=note_it[3], time=round(480 * (note_it[1] - current_note_time)), channel=key)) elif note_it[0] == 'off': tracks[key].append(mido.Message('note_off', note=note_it[2], velocity=note_it[3], time=round(480 * (note_it[1] - current_note_time)), channel=key)) current_note_time = note_it[1] # 3.保存这个midi文件 mid.save(file_name)
def create_midi_track(rhythm, beat_num, chord_prog, pattern): accom = mido.MidiTrack() for t in range(beat_num): sounds = chord_prog.current(t).sounds prev = 0.0 for j, at in enumerate(pattern.iterkeys()): for i, a in enumerate(pattern[at]['notes']): offset = 0 if i == 0: offset = int(48 * 4 * rhythm.simple * (at - prev)) accom.append( mido.Message('note_on', note=sounds[a % len(sounds)] + 12 * 5, time=offset, velocity=32)) for i, a in enumerate(pattern[at]['notes']): offset = 0 if i == 0: offset = int(48 * 4 * rhythm.simple * pattern[at]['length']) accom.append( mido.Message('note_off', note=sounds[a % len(sounds)] + 12 * 5, time=offset)) prev = at + pattern[at]['length'] return accom
def merge_tracks(tracks): tmp = [] # I don't use tracknum for anything at the moment # but maybe later if I want each track to be it's own channel or something for tracknum, track in enumerate(tracks): global_t = 0 # absolute time to be used to sort msg order buffer = [] # buffer for messages from this track for msg in track: global_t += msg.time buffer.append( (msg, global_t, tracknum)) # append message, absolute time, and tracknum tmp += buffer # sort the triples in tmp by their absolute time, which is the second component tmp.sort(key=lambda triple: triple[1]) # now we will unpack these messages and recompute their time track0 = [] global_t = 0 for msg, t, _ in tmp: # new message time, msg copy is recommended instead of modifying attribute new_msg = msg.copy(time=(t - global_t)) track0.append(new_msg) global_t = t # update absolute time return mido.MidiTrack(track0)
def save_transcription(path, pitches, intervals, velocities): midi = mido.MidiFile() track = mido.MidiTrack() midi.tracks.append(track) pitches = pitches.tolist() intervals = intervals.tolist() velocities = velocities.tolist() messages = [] for index in range(len(pitches)): pitch = pitches[index] start = intervals[index][0] end = intervals[index][1] velocity = velocities[index] messages.append( mido.Message("note_on", note=pitch, time=start, velocity=velocity)) messages.append( mido.Message("note_off", note=pitch, time=end, velocity=velocity)) messages.sort(key=operator.attrgetter("time")) time = 0 for message in messages: time_delta = message.time - time tick = int(mido.second2tick(time_delta, 480, 500000)) track.append(message.copy(time=tick)) time = message.time midi.save(path)
def concat_midi(head, tail): time = 0 for i, track in enumerate(head.tracks): track_time = 0 for message in track: if hasattr(message, 'time'): track_time += message.time time = max(track_time, time) with mido.MidiFile(ticks_per_beat=48, charset='utf-8') as midi: for i in range(max(len(head.tracks), len(tail.tracks))): track = mido.MidiTrack() elapsed = 0 if i < len(head.tracks): for message in head.tracks[i]: if message.type in ['note_on', 'note_off', 'set_tempo']: track.append(message) elapsed += message.time if i < len(tail.tracks): for j, message in enumerate(tail.tracks[i]): if message.type in ['note_on', 'note_off', 'set_tempo']: if j == 0: message.time += (time - elapsed) track.append(message) midi.tracks.append(track) return midi
def to_midi_from_matrix(self, matrix): """ matrix representation into midi file """ matrix_prev = [[0 for _ in range(self.span)]] + matrix[:-1] pattern = mido.MidiFile() pattern.ticks_per_beat = 16 track = mido.MidiTrack() pattern.tracks.append(track) track.append( mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(120), time=0)) last_event_tick = 0 for tick, (state, previous_state) in enumerate(zip(matrix, matrix_prev)): offNotes, onNotes = [], [] for pitch, (n, p) in enumerate(zip(state, previous_state)): if p == 1 and n == 0: self.add_note_off_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick elif p == 0 and n == 1: self.add_note_on_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick if tick == len(matrix) - 1 and n == 1: self.add_note_off_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick track.append(mido.MetaMessage('end_of_track', time=last_event_tick + 1)) return pattern
def measure_to_messages(self, measure: np.ndarray, start_time: int, active_notes: list) -> (mido.MidiTrack, int, list): """ Takes a numpy `measure` and `start_time` (the time between the last message in the midifile and the start of this measure) and converts it to a miditrack. Returns the converted miditrack, the time between the last message and the end of the measure, and any notes that haven't been turned off as a tuple. """ cur_time = start_time track = mido.MidiTrack() for note in measure: pitches = np.where(note == 1)[0] for pitch in pitches: track = self.add_message(track, pitch, cur_time) cur_time = 0 active_notes.append({"pitch": pitch, "time": 0}) for i, active_note in enumerate(active_notes): if active_note["time"] >= self.note_duration: track = self.add_message(track, active_note["pitch"], cur_time, msg="note_off") active_notes[i] = None cur_time = 0 active_notes[:] = [n for n in active_notes if n is not None] cur_time += self.pp_time for i in range(len(active_notes)): active_notes[i]["time"] += self.pp_time return track, cur_time, active_notes
def to_midi_track(self, use_hanzi: bool = False): ''' 将dv区段对象转换为mido.MidiTrack对象 默认使用dv文件中的拼音,如果需要使用汉字,use_hanzi=True ''' import mido track = mido.MidiTrack() time = 0 for note in self.note: if (use_hanzi): track.append( mido.MetaMessage('lyrics', text=note.hanzi, time=(note.start - time))) else: track.append( mido.MetaMessage('lyrics', text=note.pinyin, time=(note.start - time))) track.append( mido.Message('note_on', note=note.notenum, velocity=64, time=0)) track.append( mido.Message('note_off', note=note.notenum, velocity=64, time=note.length)) time = note.start + note.length track.append(mido.MetaMessage('end_of_track')) return track
def merge_tracks(tracks): messages = [] for i, track in enumerate(tracks): messages.extend(to_abstime(track, i)) messages.sort(key=lambda x: x[0].time) return mido.MidiTrack(fix_end_of_track(to_reltime(messages)))
def generate(): with open("data.json", "r") as out: pattern = json.load(out) MAX = 100 new_song = mido.MidiFile() new_notes = [[192, 1], [176, 7, 127], [176, 10, 64]] for i in range(N, MAX): #Get last N notes as string to_string = "" for j in range(i - N, i): to_string += "".join([str(x) for x in new_notes[j]]) #Save the new note if to_string in pattern: new_notes.append(random.choice(pattern[to_string])) else: new_notes.append( random.choice(pattern[random.choice(pattern.keys())])) new_track = mido.MidiTrack() new_song.tracks.append(new_track) for i in range(MAX): new_track.append(mido.Message.from_bytes(new_notes[i])) with open("data_gen.txt", "a") as out: out.write(str(mido.Message.from_bytes(new_notes[i])) + "\n") new_song.save("new_song.mid")
def transfer_midi(variation, out_path): __time = 0 #__tempo = mido.bpm2tempo(bpm) #__octave = octave ticks_per_beat = 960 # mido.MetaMessage('set_tempo', tempo=__tempo) outfile = mido.MidiFile() track = mido.MidiTrack() outfile.tracks.append(track) for x in variation: __velocity = random.sample(range(85, 110), 1)[0] __tmp = convert_midi(x, ticks_per_beat) __note = __tmp[0] __time = int(__tmp[1]) #print (__time,__note) track.append( mido.Message('note_on', note=__note, velocity=__velocity, time=__time)) track.append( mido.Message('note_off', note=__note, velocity=__velocity, time=__time)) track.append(mido.Message('note_on', note=60, velocity=0, time=240)) track.append(mido.Message('note_off', note=60, velocity=0, time=240)) outfile.save(out_path)