def writeNoteListToMidiFile(self): with MidiFile(type=0) as mid: track = MidiTrack() mid.tracks.append(track) track.append(MetaMessage('set_tempo', tempo=mido.bpm2tempo(self.tempo))) track.append(MetaMessage('time_signature', numerator=4, denominator=4)) if self.noteList[0][0][0] == 144: track.append(Message('note_on', note=self.noteList[0][0][1], velocity=self.noteList[0][0][2], time=0)) for i in range(1, len(self.noteList)): noteEvent = self.noteList[i] lastEvent = self.noteList[i-1] if noteEvent[0][0] == 144: track.append(Message('note_on', note=noteEvent[0][1], velocity=noteEvent[0][2], \ time=(int(milliSecondsToTicks(noteEvent[1], mido.bpm2tempo(self.tempo), mid.ticks_per_beat))) - \ int(milliSecondsToTicks(lastEvent[1], mido.bpm2tempo(self.tempo), mid.ticks_per_beat)))) elif noteEvent[0][0] == 128: track.append(Message('note_off', note=noteEvent[0][1], velocity=noteEvent[0][2], \ time=(int(milliSecondsToTicks(noteEvent[1], mido.bpm2tempo(self.tempo), mid.ticks_per_beat))) - \ int(milliSecondsToTicks(lastEvent[1], mido.bpm2tempo(self.tempo), mid.ticks_per_beat)))) lickFileName = 'lick' + str(self.numberOfLicks) + '-' + str(self.tempo) mid.save(MIDI_DIR + lickFileName + '.mid') self.midiFileDisplayList.addItem(lickFileName) self.midiFileList.append((lickFileName, self.tempo)) self.numberOfLicks += 1 self.noteList = []
def to_midi(self, bpm=120, low_c=48): midi = mido.MidiFile() track = mido.MidiTrack() midi.tracks.append(track) track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm))) sec_per_sub = 60. / bpm / self.subdivisions_per_beat subdiv_delta = int( mido.second2tick(sec_per_sub, midi.ticks_per_beat, mido.bpm2tempo(bpm))) current_delta = 0 last_time = 0 note_on_events = [('note_on', i.note, i.time) for i in self.__notes] note_off_events = [('note_off', i.note, i.time + i.length) for i in self.__notes] note_events = sorted(note_on_events + note_off_events, key=lambda x: x[2]) for note_type, note, time in note_events: current_time = int((time - last_time) * subdiv_delta) track.append( mido.Message(note_type, note=note + low_c, velocity=80, time=current_time)) last_time = time return midi
def __init__(self, event_seq, tempo=mido.bpm2tempo(120)): self.event_seq = event_seq self.last_velocity = 0 self.delta_time = 0 self.tempo = mido.bpm2tempo(120) self.track_tempo = tempo self.reset()
def matrix_to_midi(matrix, subdivisions_per_beat, bpm=120, low_c=60, hold_notes=False): midi = mido.MidiFile() track = mido.MidiTrack() midi.tracks.append(track) track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm))) sec_per_sub = 60. / bpm / subdivisions_per_beat subdiv_delta = int( mido.second2tick(sec_per_sub, midi.ticks_per_beat, mido.bpm2tempo(bpm))) for subdivision in range(matrix.shape[1]): current_delta = subdiv_delta try: for note in range(matrix.shape[0]): off = matrix[note, subdivision - 1] and not matrix[note, subdivision] if hold_notes \ else matrix[note, subdivision - 1] on = matrix[note, subdivision] and not matrix[note, subdivision - 1] if hold_notes \ else matrix[note, subdivision] if off: track.append( mido.Message('note_off', note=note + low_c, velocity=80, time=current_delta)) current_delta = 0 if on: track.append( mido.Message('note_on', note=note + low_c, velocity=80, time=current_delta)) current_delta = 0 except IndexError: for note in range(matrix.shape[0]): if matrix[note, subdivision]: track.append( mido.Message('note_on', note=note + low_c, velocity=80, time=0)) return midi
def notes2mid(notes): mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) mid.ticks_per_beat = 480 new_tempo = mido.bpm2tempo(120.0) track.append(mido.MetaMessage('set_tempo', tempo=new_tempo)) track.append(mido.Message('program_change', program=0, time=0)) cur_total_tick = 0 for note in notes: if note[2] == 0: continue note[2] = int(round(note[2])) ticks_since_previous_onset = int(mido.second2tick( note[0], ticks_per_beat=480, tempo=new_tempo)) ticks_current_note = int(mido.second2tick( note[1]-0.0001, ticks_per_beat=480, tempo=new_tempo)) note_on_length = ticks_since_previous_onset - cur_total_tick note_off_length = ticks_current_note - note_on_length - cur_total_tick track.append(mido.Message( 'note_on', note=note[2], velocity=100, time=note_on_length)) track.append(mido.Message( 'note_off', note=note[2], velocity=100, time=note_off_length)) cur_total_tick = cur_total_tick + note_on_length + note_off_length return mid
def midify_tempo(self, signal): if(signal['type'] != 'tempo'): print('Error when midifying tempo') return '' self.ticks_per_beat = mido.bpm2tempo(int(signal['bpm']) * 18) return MetaMessage("set_tempo", tempo=self.ticks_per_beat)
def create_midi(notelist, dir, bpm, ticks): #박자를 tick으로 변경 for note in notelist: note.length = note_tools.cal_beat_to_tick((note.length), ticks) ontime = 0 mid_create = MidiFile(ticks_per_beat=ticks) track_list = MidiTrack() mid_create.tracks.append(track_list) track_list.append(Message('program_change', program=0, time=0)) track_list.append( MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm), time=0)) for i in notelist: if type(i) is note_tools.rest_note: ontime += i.length else: track_list.append( Message('note_on', note=i.note, velocity=100, time=ontime)) track_list.append( Message('note_off', note=i.note, velocity=64, time=i.length)) ontime = 0 mid_create.save(dir)
def preprocessOneWav(self, inPath, outPath): chunks, tempo = self.midiUtils.split_wav(inPath, outPath) for source in chunks: self.converter.WavToSpec(source, source[:-4] + ".jpg") x = [] filenums = [] for filename in os.listdir(outPath): if not filename.endswith(".jpg"): continue im = Image.open(os.path.join(outPath, filename)) im = im.crop((14, 13, 594, 301)) resize = im.resize((49, 145), Image.NEAREST) resize.load() arr = np.asarray(resize, dtype="float32") x.append(arr) filenums.append(int(filename.split("_")[-1].split(".")[0])) x = np.array(x) x /= 255.0 return x, filenums, mido.bpm2tempo(tempo)
def createMidi(tempo, bahp_points): """ Create a mido.MidiFile object from an initial tempo (int) and a list of bahp points (each element is a tuple: (tick, pitch, velocity, duration)) """ assertMido('to') bahp_points.sort() # MIDI setup midi = mido.MidiFile(type=0, ticks_per_beat=QUARTER_NOTE) track = mido.MidiTrack() midi.tracks.append(track) # Tempo track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(tempo), time=0)) # Bahps (notes) current_time = 0 for t, p, v, d in bahp_points: track.append(mido.Message('note_on', note=p, velocity=v, time=(t - current_time))) track.append(mido.Message('note_off', note=p, velocity=0, time=d)) current_time = t + d return midi
def save_musical_file(path: str, out: str = None): if out is None: out = path f = MidiFile() ap = f.add_track('Main').append last_bpm = tempo2bpm(DEFAULT_TEMPO) ap(MetaMessage("set_tempo", tempo=DEFAULT_TEMPO)) notes_on = [] i = 0 for bpm, duration, notes in musical_extract_midi(path): i += 1 if notes_on: ap(Message(type='note_off', note=notes_on[0], time=int(duration * f.ticks_per_beat * 0.95))) for n in notes_on[1:]: ap(Message(type='note_off', note=n, time=0)) notes_on = notes ap(Message(type='note_on', note=notes_on[0], time=int(duration * f.ticks_per_beat * 0.05))) for n in notes_on: ap(Message(type='note_on', note=n, time=0)) if bpm != last_bpm: last_bpm = bpm ap(MetaMessage("set_tempo", tempo=bpm2tempo(bpm))) if notes_on: # Last note; just make it 1 beat long ap(Message(type='note_off', note=notes_on[0], time=f.ticks_per_beat)) for n in notes_on[1:]: ap(Message(type='note_off', note=n, time=0)) f.save(out) print(f"{i} hits wrote to {out}")
def play(self, path, basebpm=120, transpose=0) -> None: """ Spielt eine MIDI-Datei auf dem Carillon ab. Dabei wird die erste Spur verwendet. Parameters ---------- path : str Pfad zur abzuspielenden MIDI-Datei. basebpm : int (optional) BPMs, die 120 entsprechen. Damit lässt sich das Stück beschleunigen oder verlangsamen. transpose : int (optional) Anzahl der Halbtöne, um die das Stück transponiert werden soll. """ mid = mido.MidiFile(path) tempo = mido.bpm2tempo(basebpm) for msg in mid.tracks[0]: if msg.type == 'set_tempo': tempo = 120 / basebpm * msg.tempo if msg.type not in ('note_on', 'note_off'): continue time.sleep(mido.tick2second(msg.time, mid.ticks_per_beat, tempo)) if msg.velocity == 0: continue self.hit(msg.note + transpose)
def set_bpm(bpm=-1): # tempo is the only requirement if bpm <= 60: bpm = random.randint(70, 160) print('BPM set to ' + str(bpm)) tempo = mido.bpm2tempo(bpm) track.append(MetaMessage('set_tempo', tempo=tempo, time=0))
def change_midi_file_tempo(input_file, output_file, ratio=1.0): """ Change the tempo in a midi file """ infile = MidiFile(input_file) with MidiFile() as outfile: outfile.type = infile.type outfile.ticks_per_beat = infile.ticks_per_beat for msg_idx, track in enumerate(infile.tracks): out_track = MidiTrack() outfile.tracks.append(out_track) for message in track: if message.type == 'set_tempo': bpm = tempo2bpm(message.tempo) new_bpm = ratio * bpm message.tempo = bpm2tempo(new_bpm) out_track.append(message) outfile.save(output_file)
def make_midi(input_wav, notes, tempo, mean_beat, instrument, velocity): mid = mido.MidiFile() track = mido.MidiTrack() mid.tracks.append(track) tempo = mido.bpm2tempo(tempo) track.append( mido.MetaMessage('set_tempo', tempo=round(tempo * slow), time=0)) track.append(mido.Message('program_change', program=instrument, time=0)) for note in notes: gap = int(round((note[1] * 480) / mean_beat)) if (note[0] == 'r'): track.append(mido.Message('note_on', note=60, velocity=0, time=0)) track.append( mido.Message('note_off', note=60, velocity=0, time=gap)) else: note_num = librosa.note_to_midi(note[0]) track.append( mido.Message('note_on', note=note_num, velocity=velocity, time=0)) track.append( mido.Message('note_off', note=note_num, velocity=velocity, time=gap)) output_midi = getFilename(input_wav, instrument) # mid.save(output_midi) return mid, output_midi
def from_midi(filename): midi = mido.MidiFile(filename) # convert to absolute time in beats and separate by channel if midi.type == 0: channels = __to_abs_time_per_channel_type_0(midi) if midi.type == 1: channels = __to_abs_time_per_channel_type_1(midi) # for every channel, find instrument instruments = list(map(__find_channel_instrument, channels[:-1])) # parse every tempo change tempo_times = [msg.time for msg in channels[-1] if msg.type == "set_tempo"] tempos = [] # if no tempo changes found, assume constant 120BPM throughout if len(tempo_times) == 0: tempo_times.append(0) channels[-1].insert( 0, mido.MetaMessage("set_tempo", time=0, tempo=mido.bpm2tempo(120))) for i in range(len(tempo_times) - 1): tempos.append( __parse_tempo(channels.copy(), instruments, tempo_times[i], tempo_times[i + 1])) tempos.append(__parse_tempo(channels.copy(), instruments, tempo_times[-1])) # combine all tempo fragmens into one sequence return Melody(tempos)
def make_training_audio(channel, program, note_list, duration, sr): with tempfile.TemporaryDirectory() as tmp_dir: midi_path = os.path.join(tmp_dir, 'train.mid') wave_path = os.path.join(tmp_dir, 'train.wav') fnull = open(os.devnull, 'w') mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(MetaMessage('set_tempo', tempo=bpm2tempo(240))) track.append( Message('program_change', channel=channel - 1, program=program - 1, time=0)) for ni, note in enumerate(note_list): track.append( Message('note_on', note=note, time=int(1000 * duration) if ni > 0 else 0)) track.append( Message('note_off', note=note, time=int(1000 * duration))) mid.save(midi_path) subprocess.call(['musescore', '-o', wave_path, midi_path], stdout=fnull, stderr=fnull) y, _ = librosa.core.load(wave_path, sr, duration=(len(note_list) + 1) * duration) return y
def write_song(self, file_name): mid = MidiFile(type=1) # 0 type means all messages are on one track track_pitch = [0, -12] track_num = 2 tracks = [] for i in range(track_num): track = MidiTrack() tracks.append(track) mid.tracks.append(track) track.append( MetaMessage("set_tempo", tempo=mido.bpm2tempo(self.bpm))) for i, note in enumerate(self.notes): tracks[i % track_num].append( Message("note_on", note=note.pitch + track_pitch[i % track_num], velocity=127, time=0)) tracks[i % track_num].append( Message("note_on", note=note.pitch + track_pitch[i % track_num], velocity=0, time=note.duration * mid.ticks_per_beat)) mid.save(file_name)
def get_delta_time(cue): #tempos = self.tempos if len(tempos) > 1: # print(str(cue["tick"])) tick = cue["tick"] time = 0 last_tempo = 0 last_tick = 0 for tempo in tempos: bpm = tempo["tempo"] t = tempo["tick"] if t != 0: if tick >= t: tick_time = 60000 / (last_tempo * 480) tick_count = t - last_tick time = time + (tick_time * tick_count) last_tempo = bpm last_tick = t else: break else: last_tempo = bpm difference = tick - last_tick if difference != 0: tick_time = 60000 / (last_tempo * 480) time = time + (tick_time * difference) return time else: # print("ELSE get_delta_time tempos: " + str(mido.bpm2tempo(tempos[0]["tempo"]))) # print("cue tick: " + str(cue["tick"])) return cue["tick"] / 480 * mido.bpm2tempo(tempos[0]["tempo"]) /1000
def to_midi_track(self): """Converts this track to MidiTrack from mido""" track = MidiTrack() track.append(MetaMessage('set_tempo', tempo=bpm2tempo(self.bpm))) # dont yet know what this does track.append( Message('program_change', program=self.instrument.value, time=0, channel=self.channel)) preprocessed_events = self.__preprocess_events() last_tick = 0 for event in preprocessed_events: time_diff = event.tick - last_tick last_tick = event.tick track.append( Message( 'note_on' if event.type == 'start' else 'note_off', note=event.note, velocity=64, time=time_diff, channel=self.channel, )) return track
def write_midi(filename, notes, tpb): mid = mido.MidiFile( ticks_per_beat=tpb ) # copy ticks_per_beat from source to avoid rounding errors track = mido.MidiTrack() mid.tracks.append(track) tempo = mido.bpm2tempo(120) track.append(mido.MetaMessage('set_tempo', tempo=tempo)) track.append(mido.MetaMessage('time_signature')) track.append(mido.Message('program_change', program=0)) events = [(n[0], n[1], 'note_on') for n in notes] events.extend([(n[0], n[2], 'note_off') for n in notes]) events = sorted(events, key=lambda n: n[1]) time = t0 = 0 for pitch, t1, eventtype in events: time += t1 - t0 dt = mido.second2tick(t1 - t0, tpb, tempo) message = mido.Message(eventtype, note=pitch, velocity=64, time=round(dt)) track.append(message) t0 = t1 mid.save(filename)
def _join_tracks(left_track, right_track): default_tempo = mido.bpm2tempo(120) default_ticks_per_beat = 480 messages = [] for msg in _to_abstime(left_track): is_note_on = (msg.type == 'note_on') is_note_off = (msg.type == 'note_off') if is_note_on or is_note_off: time = mido.tick2second(msg.time, default_ticks_per_beat, default_tempo) event = MidiEvent(pitch=msg.note, is_note_on=is_note_on, when=time, is_left=True) messages.append(event) for msg in _to_abstime(right_track): is_note_on = (msg.type == 'note_on') is_note_off = (msg.type == 'note_off') if is_note_on or is_note_off: time = mido.tick2second(msg.time, default_ticks_per_beat, default_tempo) event = MidiEvent(pitch=msg.note, is_note_on=is_note_on, when=time, is_left=False) messages.append(event) messages.sort(key=lambda msg: msg.when) return messages
def record(midi_file, bpm): ctx = zmq.Context() sock = ctx.socket(zmq.SUB) sock.connect(INPUT_ZMQ_URL) sock.subscribe(b'') track = MidiTrack() track.append( MetaMessage('track_name', name=f'MIDIate Recorder {str(datetime.now())}', time=0)) midi_file.tracks.append(track) print('Recording...') try: while True: data = sock.recv() current_ts, msg = data[:8], data[8:] current_ts, = struct.unpack("<Q", current_ts) msg = Message.from_bytes(msg) msg.time = int( second2tick(current_ts / NANOSECONDS_IN_SECONDS, TICKS_PER_BEAT, bpm2tempo(bpm))) track.append(msg) except KeyboardInterrupt: print('Recorder stopped.')
def add_drum(mid1, output_midi, tempo, mean_beat, beat_num, type, velocity): mid = mid1 track2 = mido.MidiTrack() mid.tracks.append(track2) tempo = mido.bpm2tempo(tempo) track2.append( mido.MetaMessage('set_tempo', tempo=round(tempo * slow), time=0)) gap = 480 for i in range(0, beat_num): track2.append(mido.Message('note_on', note=60, velocity=0, time=0)) track2.append(mido.Message('note_off', note=60, velocity=0, time=gap)) if (i % 2 == 1): track2.append(mido.Message('program_change', program=type, time=0)) # 这个音轨使用的乐器 track2.append( mido.Message('note_on', note=24, velocity=velocity - 30, time=0)) track2.append( mido.Message('note_off', note=24, velocity=velocity - 30, time=0)) else: track2.append(mido.Message('program_change', program=type, time=0)) # 这个音轨使用的乐器 track2.append( mido.Message('note_on', note=14, velocity=velocity, time=0)) track2.append( mido.Message('note_off', note=14, velocity=velocity, time=0)) output_midi = getFilename(output_midi, type) # mid.save(output_midi) return mid, output_midi
def read(filename: str, trackId: int = 1, tempo: int = -1, numerator: int = -1, denominator: int = -1): mid = MidiFile(filename) if len(mid.tracks) <= trackId: raise Exception( 'This midi file {} has only {} tracks rather than {}.'.format( filename, len(mid.tracks), trackId)) for metaMessage in mid.tracks[0]: if metaMessage.type == 'set_tempo': if tempo == -1: tempo = mido.bpm2tempo(metaMessage.tempo) elif metaMessage.type == 'time_signature': if denominator == -1: denominator = metaMessage.denominator if numerator == -1: numerator = metaMessage.numerator ticksPerBeat = mid.ticks_per_beat delta = ticksPerBeat * denominator track = mid.tracks[trackId] notes = [] for message in track: if message.type == 'note_on': if message.time != 0: notes.append(Note.PreciseRest(message.time / delta)) elif message.type == 'note_off': notes.append(Note(message.note, message.time / delta)) return MidoHelper(tempo, numerator, denominator).addTrack(notes)
def to_midi_from_matrix(self, matrix): """ matrix representation into midi file """ matrix_prev = [[0 for _ in range(self.span)]] + matrix[:-1] pattern = mido.MidiFile() pattern.ticks_per_beat = 16 track = mido.MidiTrack() pattern.tracks.append(track) track.append( mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(120), time=0)) last_event_tick = 0 for tick, (state, previous_state) in enumerate(zip(matrix, matrix_prev)): offNotes, onNotes = [], [] for pitch, (n, p) in enumerate(zip(state, previous_state)): if p == 1 and n == 0: self.add_note_off_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick elif p == 0 and n == 1: self.add_note_on_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick if tick == len(matrix) - 1 and n == 1: self.add_note_off_event(track, tick - last_event_tick, pitch + self.lowerbound) last_event_tick = tick track.append(mido.MetaMessage('end_of_track', time=last_event_tick + 1)) return pattern
def get_note_strike_times(self, midi_file): #{ tempo = mido.bpm2tempo(DEFAULT_BPM) assert len( midi_file.tracks ) == 1, f"MIDI must have 1 track, not: {len(midi_file.tracks)}" strike_times = [] accrued_ticks = 0 for message in midi_file.tracks[0]: #{ # Non-zero velocity --> note has been struck # Zero velocity --> note has been released accrued_ticks += message.time if message.type == 'note_on' and message.velocity > 0: strike_times.append( mido.tick2second(accrued_ticks, midi_file.ticks_per_beat, tempo)) #} print( f"Found {len(strike_times)} note strikes in MIDI file: {midi_file.filename}" ) return strike_times
def write_MIDI_file(self, video_file_path): #create the file path for the output midi file: (assume video in mp4 format and in resources/input_videos) output_file_path = 'resources/output_midi/' + video_file_path[ 23:-4] + '.mid' #initialize the MIDI file header: mid = MidiFile(ticks_per_beat=500) track = MidiTrack() mid.tracks.append(track) ticks_per_beat = 500 tempo = bpm2tempo(120) track.append(MetaMessage('set_tempo', tempo=bpm2tempo(120))) last_tick_with_write = 0 #special case for first frame: none_written = True for key in self.event_map[0]: midi_file.note_on(0, key, 64) none_written = False #iterate through the event_map: for ms_index in range(1, math.ceil(self.max_timestamp)): #Time-keeping: ticks = ms_index - last_tick_with_write #NOTE OFF events: for key in set(self.event_map[ms_index - 1]): if key not in self.event_map[ms_index]: track.append( Message('note_off', note=key, velocity=64, time=ticks)) none_written = False ticks = 0 #NOTE ON events: for key in set(self.event_map[ms_index]): if key not in self.event_map[ms_index - 1]: track.append( Message('note_on', note=key, velocity=64, time=ticks)) none_written = False ticks = 0 #Time-keeping: if not none_written: last_tick_with_write = ms_index none_written = True mid.save(output_file_path) print("={ MIDI file successfully written }=")
def stream(): # Determine style gen_style = [] for style, style_id in styles.items(): strength = request.args.get(style, 0) gen_style.append(one_hot(style_id, NUM_STYLES) * float(strength)) gen_style = np.mean(gen_style, axis=0) if np.sum(gen_style) > 0: # Normalize gen_style /= np.sum(gen_style) else: gen_style = None seq_len = max(min(int(request.args.get('length', 1000)), 10000), 1000) if 'seed' in request.args: # TODO: This may not work for multithreading? seed = int(request.args['seed']) np.random.seed(seed) print('Using seed {}'.format(seed)) uuid = uuid4() logger.info('Stream ID: {}'.format(uuid)) logger.info('Style: {}'.format(gen_style)) folder = '/tmp' mid_fname = os.path.join(folder, '{}.mid'.format(uuid)) logger.info('Generating MIDI') seq = Generation(model, style=gen_style, default_temp=0.9).generate(seq_len=seq_len, show_progress=False) track_builder = TrackBuilder(iter(seq), tempo=mido.bpm2tempo(90)) track_builder.run() midi_file = track_builder.export() midi_file.save(mid_fname) logger.info('Synthesizing MIDI') # Synthsize fsynth_proc = subprocess.Popen([ 'fluidsynth', '-nl', '-f', 'fluidsynth.cfg', '-T', 'raw', '-g', str(gain), '-F', '-', soundfont, mid_fname ], stdout=subprocess.PIPE) # Convert to MP3 lame_proc = subprocess.Popen(['lame', '-q', '5', '-r', '-'], stdin=fsynth_proc.stdout, stdout=subprocess.PIPE) logger.info('Streaming data') data, err = lame_proc.communicate() os.remove(mid_fname) return Response(data, mimetype='audio/mp3')
def createMidiRhythmScore(midi_filename, onset_frames_index_of_16th_notes, strong_onset_frames_index_of_16th_notes, weak_onset_frames_index_of_16th_notes, bpm, auftakt_16th_notes_number=0): #MIDIデータの作成 #16分音符のtickでの単位あたりの長さ ticks_per_16th_note = 120 ticks_per_beat = ticks_per_16th_note * 4 #4分音符は480がデフォルト #各音符の配置される場所(tick) onset_ticks = np.array( onset_frames_index_of_16th_notes) * ticks_per_16th_note strong_onset_ticks = np.array( strong_onset_frames_index_of_16th_notes) * ticks_per_16th_note weak_onset_ticks = np.array( weak_onset_frames_index_of_16th_notes) * ticks_per_16th_note #auftaktの処理(本来mido自体をいじるべきだが便宜上ここで) #onset_ticks = list(filter(lambda x: x >= ticks_per_16th_note * auftakt_16th_notes_number, onset_ticks)) #strong_onset_ticks = list(filter(lambda x: x >= ticks_per_16th_note * auftakt_16th_notes_number, strong_onset_ticks)) #weak_onset_ticks = list(filter(lambda x: x >= ticks_per_16th_note * auftakt_16th_notes_number, weak_onset_ticks)) #事前処理 smf = mido.MidiFile(ticks_per_beat=ticks_per_beat) track = mido.MidiTrack() track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm))) track.append(mido.Message('program_change', program=1)) #音色 #音符入力 #最初だけデルタタイム入れる onset_ticks_diff = np.diff(onset_ticks) #auftaktの処理 #track.append(mido.Message('note_off',time=(ticks_per_16th_note * 12))) track.append( mido.Message('note_off', time=(ticks_per_16th_note * 16) - (ticks_per_16th_note * auftakt_16th_notes_number))) i = 0 for i in range(len(onset_ticks) - 1): delta = onset_ticks[i + 1] - onset_ticks[i] if onset_ticks[i] in strong_onset_ticks: track.append( mido.Message('note_on', velocity=100, note=librosa.note_to_midi('F3'))) track.append(mido.Message('note_off', time=delta)) track.append( mido.Message('note_off', note=librosa.note_to_midi('F3'))) elif onset_ticks[i] in weak_onset_ticks: track.append( mido.Message('note_on', velocity=50, note=librosa.note_to_midi('A3'))) track.append(mido.Message('note_off', time=delta)) track.append( mido.Message('note_off', note=librosa.note_to_midi('A3'))) track.append(mido.MetaMessage('end_of_track')) smf.tracks.append(track) #midiの出力 smf.save(midi_filename)
def beats_to_ticks(seconds, bpm): # FIXME: shit is broke2 # 120 beats / min # 120 / 60 = 2 bps ticks_per_beat = 480 # precision # seconds_per_beat = 2 * seconds seconds_per_beat = 1 / (bpm / 60) # 120bpm / 60s = 2bps = 1/2spb tempo = mido.bpm2tempo(bpm) # microseconds per beat return mido.second2tick(seconds_per_beat, ticks_per_beat, tempo)
def add_meta_info(self): tempo = mido.bpm2tempo(self.bpm) numerator = Fraction(self.time_signature).numerator denominator = Fraction(self.time_signature).denominator super().append(MetaMessage('time_signature', numerator=numerator, denominator=denominator)) super().append(MetaMessage('set_tempo', tempo=tempo, time=0)) super().append(MetaMessage('key_signature', key=self.key)) for channel, program in self.instruments.items(): super().append(Message('program_change', channel=int(channel), program=program, time=0))
def setTempo(self, tempo): """ adjusts the tempo of the file while playing """ if abs(tempo - self.tempo) < MAX_CHANGE: self.tempo = tempo elif tempo - self.tempo > MAX_CHANGE: self.tempo = self.tempo + MAX_CHANGE/2 elif self.tempo - tempo > MAX_CHANGE: self.tempo = self.tempo - MAX_CHANGE/2 print(self.tempo) message = mido.MetaMessage("set_tempo", tempo=int(mido.bpm2tempo(self.tempo)))
def write_tempo_map(self): # Figure out BPM, either from command line argument or from actual timing bpm = self.tempo_bpm if not bpm: end_time = time.time() delta = end_time - self.start_time beats = self.total_ticks / TICKS_PER_BEAT bpm = int(round(beats * 60 / delta)) print("BPM: %d" % bpm) # Write tempo and time signature to tempo map track self.tempo_map_track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm))) self.tempo_map_track.append(mido.MetaMessage('time_signature', numerator=4, denominator=4))
with mido.MidiFile(type=1) as mid: song_name = "exported_song" + ".mid" song_name = "test" + ".mid" time_signature = "4/4".split("/") time_signature = [int(n) for n in time_signature] bpm = 170 channel = 1 measures = 30 repeat_chance = .5 scale = "major" key = Note("C", 3).apply_relation("scale", scale=scale) key = [n.number for n in key] info_track = mido.MidiTrack() info_track.append(mido.MetaMessage('set_tempo', tempo=mido.bpm2tempo(bpm))) mid.tracks.append(info_track) for i in range(1): track = mido.MidiTrack() track.append(mido.MetaMessage('track_name', name="track_name", time=1)) mid.tracks.append(track) measure_length = time_signature[1] * BEAT for i in range(measures): # going to be used later in more complicated melodies with rests progress = 0 num_melodies = random.choice([1, 2, 4]) # maybe change depending on time signature, # but not working thirds yet melody_length = measure_length / num_melodies
def midoFileToNoteStateSeq(midoFile): assert(midoFile.type != 2) tempoMap = midoFileToTempoMap(midoFile) seq = [] for track in midoFile.tracks: # Initialize variables for each track ticksLapsed = 0 timeLapsed = 0 currentTimeUnit = 0 tempoIndex = 0 isPercussion = False tickResolutionInUs = mido.bpm2tempo(120) / midoFile.ticks_per_beat # Intial tempo/tick resolution currentTrackNoteState = np.zeros(PITCH_COUNT, dtype="float32") # All notes off # Set the first nextTempoEvent nextTempoEvent = None if tempoMap: nextTempoEvent = tempoMap[tempoIndex] for event in track: # Update ticks # Check if there is a tempo event, update tempo and also # update delta time making sure to account for tempo events ticksLapsed += event.time deltaTime = 0 if nextTempoEvent is not None and ticksLapsed >= nextTempoEvent[0]: prevTempoEventTicks = ticksLapsed - event.time while nextTempoEvent is not None and ticksLapsed >= nextTempoEvent[0]: deltaTime += (nextTempoEvent[0] - prevTempoEventTicks) * tickResolutionInUs #Update from ticks between tempo events tickResolutionInUs = nextTempoEvent[1] / midoFile.ticks_per_beat tempoIndex += 1 prevTempoEventTicks = nextTempoEvent[0] nextTempoEvent = tempoMap[tempoIndex] if tempoIndex < len(tempoMap) else None deltaTime += (ticksLapsed - prevTempoEventTicks) * tickResolutionInUs #Update from ticks after the last tempo event else: deltaTime = event.time * tickResolutionInUs # Update from event delta time timeLapsed += deltaTime # Ignore meta events if event.is_meta: continue # Update time units timeUnitsLapsed = timeLapsed / RESOLUTION_TIME previousTimeUnit = currentTimeUnit currentTimeUnit = int(round(timeUnitsLapsed)) # Fill from previous notes state values to seq till currentTimeUnit if currentTimeUnit > len(seq) - 1: for _ in range(currentTimeUnit - len(seq) + 1): seq.append(np.copy(currentTrackNoteState)) else: # Add notes to seq if modifying previously written notes from other tracks if not currentTrackNoteState.all(0): for i in range(previousTimeUnit+1, currentTimeUnit+1): seq[i] = np.add(seq[i], currentTrackNoteState) seq[i].clip(0, 1) # Ignore percussion instrument segments if event.type == 'program_change': if event.program >= 113 and event.program <= 120: isPercussion = True else: isPercussion = False continue if isPercussion: continue # Determine event noteEvent = False if event.type == 'note_on': noteEvent = True noteState = True elif event.type == 'note_off': noteEvent = True noteState = False if not noteEvent: continue if event.note >= PITCH_LOWERBOUND and event.note <= PITCH_UPPERBOUND: # Determine all note events with 0 velocity as note_off if event.velocity == 0: noteState = False # Update current note state note = midiNoteToInputIndex(event.note) currentTrackNoteState[note] = np.float32(noteState) # Create room between note off and note on if necessary so that they're evident in seq pass # Add the current note state to seq if currentTimeUnit == len(seq) - 1: seq[currentTimeUnit] = np.copy(currentTrackNoteState) else: seq[currentTimeUnit] = np.add(seq[currentTimeUnit], currentTrackNoteState) seq[currentTimeUnit].clip(0, 1) return seq
tolerance = 0.8 notes_o = notes("default", win_s, hop_s, samplerate) print("%8s" % "time","[ start","vel","last ]") # create a midi file mid = MidiFile() track = MidiTrack() mid.tracks.append(track) ticks_per_beat = mid.ticks_per_beat # default: 480 bpm = 120 # default midi tempo tempo = bpm2tempo(bpm) track.append(MetaMessage('set_tempo', tempo=tempo)) track.append(MetaMessage('time_signature', numerator=4, denominator=4)) def frames2tick(frames, samplerate=samplerate): sec = frames / float(samplerate) return int(second2tick(sec, ticks_per_beat, tempo)) last_time = 0 # total number of frames read total_frames = 0 while True: samples, read = s() new_note = notes_o(samples) if (new_note[0] != 0):
def write_midi(pr, quantization, write_path, tempo=80): def pr_to_list(pr): # List event = (pitch, velocity, time) T, N = pr.shape t_last = 0 pr_tm1 = np.zeros(N) list_event = [] for t in range(T): pr_t = pr[t] mask = (pr_t != pr_tm1) if (mask).any(): for n in range(N): if mask[n]: pitch = n velocity = int(pr_t[n]) # Time is incremented since last event t_event = t - t_last t_last = t list_event.append((pitch, velocity, t_event)) pr_tm1 = pr_t return list_event # Tempo microseconds_per_beat = mido.bpm2tempo(tempo) # Write a pianoroll in a midi file mid = MidiFile() # ticks_per_beat can be the quantization, this simplify the writing process mid.ticks_per_beat = quantization # Each instrument is a track for instrument_name, matrix in pr.iteritems(): # A bit shity : if the pr is a binary pr, multiply by 127 if np.max(matrix) == 1: matrix = matrix * 127 # Add a new track with the instrument name to the midi file track = mid.add_track(instrument_name) # transform the matrix in a list of (pitch, velocity, time) events = pr_to_list(matrix) # Tempo track.append(mido.MetaMessage('set_tempo', tempo=microseconds_per_beat)) # Add the program_change try: program = program_change_mapping[instrument_name] except: # Defaul is piano print instrument_name + " not in the program_change mapping" print "Default value is 1 (piano)" print "Check acidano/data_processing/utils/program_change_mapping.py" program = 1 track.append(mido.Message('program_change', program=program)) # This list is required to shut down # notes that are on, intensity modified, then off only 1 time # Example : # (60,20,0) # (60,40,10) # (60,0,15) notes_on_list = [] # Write events in the midi file for event in events: pitch, velocity, time = event if velocity == 0: # Get the channel track.append(mido.Message('note_off', note=pitch, velocity=0, time=time)) notes_on_list.remove(pitch) else: if pitch in notes_on_list: track.append(mido.Message('note_off', note=pitch, velocity=0, time=time)) notes_on_list.remove(pitch) time = 0 track.append(mido.Message('note_on', note=pitch, velocity=velocity, time=time)) notes_on_list.append(pitch) mid.save(write_path) return
import math, mido TRAIN_MUSIC_FOLDER = "music_all" DATA_FOLDER = "data" NOTES = ["A", "A#", "B", "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#"] RESOLUTION_TIME = 50 * 1000 # In microseconds represents 1 time unit FILE_GAP_TIME = 2000 # In milliseconds OUTPUT_TICKS_PER_BEAT = 500 OUTPUT_TEMPO = mido.bpm2tempo(120) OUTPUT_RESOLUTION_TIME = OUTPUT_TEMPO / OUTPUT_TICKS_PER_BEAT PITCH_LOWERBOUND = 24 # C2 (Midi note value) PITCH_OCTAVES = 5 # Upperbound: B6 PITCH_COUNT = len(NOTES) * PITCH_OCTAVES PITCH_UPPERBOUND = PITCH_LOWERBOUND + PITCH_COUNT - 1 N_INPUT_TIME = 0.5 * 1000 * 1000 #In microseconds N_OUTPUT_TIME = 0.5 * 1000 * 1000 N_OUTPUT_UNITS = int(math.ceil(N_OUTPUT_TIME / RESOLUTION_TIME)) N_INPUT_UNITS = int(math.ceil(N_INPUT_TIME / RESOLUTION_TIME)) N_CHANNELS = 1 N_PLAY_THRESHOLD = 0.5 N_BATCH_SIZE = 100 N_TRAIN_EPOCHS = 500 W2V_EMBEDDING_SIZE = 50 W2V_BATCH_SIZE = 100 W2V_NUM_SKIPS = 2 W2V_SKIP_WINDOW = 1 W2V_TRAIN_EPOCHS = 100