def test_encode_and_parse(self): """Encode a message and then parse it. Should return the same message. """ msg1 = Message('note_on') msg2 = mido.parse(msg1.bytes()) self.assertTrue(msg1 == msg2)
def test_encode_and_parse_all(self): """Encode and then parse all message types. This checks mostly for errors in the parser. """ p = mido.Parser() for spec in mido.messages.get_message_specs(): if spec.type == 'sysex_end': # This is considered a part of 'sysex_start'. continue msg = Message(spec.type) p.feed(msg.bytes()) outmsg = p.get_message() self.assertTrue(outmsg is not True) self.assertTrue(outmsg.type == spec.type)
def play_note(note, length, track, base_num=0, delay=0, velocity=1.0, channel=0): bpm = 125 meta_time = 60 / bpm * 1000 # 一拍多少毫秒,一拍等于一个四分音符 major_notes = [0, 2, 2, 1, 2, 2, 2, 1] base_note = 60 # C4对应的数字 track.append( Message('note_on', note=base_note + base_num * 12 + sum(major_notes[0:note]), velocity=round(64 * velocity), time=round(delay * meta_time), channel=channel)) track.append( Message('note_off', note=base_note + base_num * 12 + sum(major_notes[0:note]), velocity=round(64 * velocity), time=round(meta_time * length), channel=channel))
def note_to_midi(save_file, notes): outfile = MidiFile() track = MidiTrack() outfile.tracks.append(track) track.append(Message('program_change', program=11)) default_pause = 240 pause = 0 elem = 60 for i, default_elem in enumerate( [key.ord_note(default_elem) for default_elem in notes]): if i != 0: pause = default_pause if default_elem != -1: elem = default_elem else: pause += default_pause track.append(Message('note_on', note=elem, velocity=100, time=pause)) pause = default_pause track.append(Message('note_off', note=elem, velocity=100, time=pause)) outfile.save(save_file) print("Файл " + save_file + " создан !")
def scale_Cmaj_mid(): """Build the Cmaj Scale midi file with two tracks right and left hand""" mid = MidiFile() track_r = MidiTrack() track_l = MidiTrack() gamme_n = [60, 62, 64, 65, 67, 69, 71, 72] gamme_rev = [i for i in gamme_n] gamme_rev.reverse() gamme_n += gamme_rev for n in gamme_n: track_r.append(Message('note_on', note=n, velocity=100, time=0)) track_r.append( Message('note_off', note=n, velocity=64, time=mid.ticks_per_beat)) for n in gamme_n: track_l.append(Message('note_on', note=n - 12, velocity=100, time=0)) track_l.append( Message('note_off', note=n - 12, velocity=64, time=mid.ticks_per_beat)) mid.tracks.append(track_r) mid.tracks.append(track_l) return mid
def add_bass(self, note, length, base_num=-1, velocity=0.7, channel=6, delay=0): bpm = self.bpm meta_time = 60 * 4 * 1000 / bpm major_notes = [0, 2, 2, 1, 2, 2, 2, 1] base_note = 60 super().append( Message('note_on', note=base_note + base_num * 12 + sum(major_notes[0:note]), velocity=round(64 * velocity), time=round(delay * meta_time), channel=channel)) super().append( Message('note_off', note=base_note + base_num * 12 + sum(major_notes[0:note]), velocity=round(64 * velocity), time=round(meta_time * length), channel=channel))
def NotoToMidi(self): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(MetaMessage("set_tempo",tempo = mido.bpm2tempo(bpm))) time = 0 basetime = 240 flag = 0 note_ls = self.note_ls for note in range(len(note_ls)): #休符の場合 if note_ls[note] == "N": if flag == 1: #前のノートを入力 track.append(Message("note_on", note = note_num, velocity=127, time = 0)) track.append(Message("note_off", note = note_num,time = notetime)) track.append(Message("note_off", note = 1,time = basetime)) flag = 0 #前のノートと同じなら繋げる elif note != 0 and note_ls[note] == note_ls[note - 1]: notetime += basetime else: #前のノートを止める if flag == 1: track.append(Message("note_on", note = note_num, velocity=127, time = 0)) track.append(Message("note_off", note = note_num,time = notetime)) #現在のmidiのノートナンバーを求める temp = note_ls[note] note_num = self.NoteNumber(temp) notetime = basetime flag = 1 mid.save("new_song.mid")
def test_pitchwheel(self): """Check if pitchwheel type check and encoding is working.""" msg = Message('pitchwheel', pitch=mido.messages.MIN_PITCHWHEEL) bytes = msg.bytes() self.assertTrue(bytes[1] == bytes[2] == 0) msg = Message('pitchwheel', pitch=mido.messages.MAX_PITCHWHEEL) bytes = msg.bytes() self.assertTrue(bytes[1] == bytes[2] == 127)
def _compose(track, notes, deltat=461.09, velocity=48, threshold=1.): """ From notes to track """ LEN, dim = notes.shape track.append(MetaMessage('set_tempo', tempo=500000)) # notes to abs time list times = [] actions = [] T = 0 for ind, line in enumerate(notes): for note in range(dim): if notes[ind, note] >= threshold and \ (ind == 0 or notes[ind-1, note] < threshold): times.append(T) actions.append(('note_on', note)) T += int(deltat) for note in range(dim): if (notes[ind, note] < threshold and notes[ind-1, note] >= threshold) or \ (ind == LEN-1 and notes[ind, note] >= threshold): times.append(T) actions.append(('note_off', note)) for i in range(len(times) - 1, 0, -1): times[i] = times[i] - times[i - 1] for t, a in zip(times, actions): if a[0] == 'note_on': track.append( Message('note_on', note=a[1], velocity=velocity, time=t)) else: track.append(Message('note_off', note=a[1], velocity=0, time=t))
def tempo_change(self): mid = MidiFile(self.midi_file) mid_new = MidiFile() track = MidiTrack() mid_new.tracks.append(track) bpm = self.bpm tempo_new = mido.bpm2tempo(bpm) track.append(MetaMessage('set_tempo', tempo=tempo_new)) for t in mid.tracks: for msg in t: if msg.type == 'note_on': track.append( Message('note_on', note=msg.note, velocity=msg.note, time=msg.time)) elif msg.type == 'note_off': track.append( Message('note_off', note=msg.note, time=msg.time)) mid_new.save(self.midi_file)
def __init__(self, file_name, config, program): self.file_name = file_name self.output = MidiFile() self.track = MidiTrack() self.output.tracks.append(self.track) self.input_to_note = dict( zip(config['validInputs'], config['midiNotes'])) self.fps = config['fps'] self.track.append(Message('program_change', program=program, time=0)) self.activated_notes = dict([(i, False) for i in config['validInputs']]) self.beats_passed = 0
def test_real_time1(send_message_mock, mappings_real_time): store.update('mappings', mappings_real_time) midi = Message(type='note_on', channel=0, note=1, velocity=127) send_midi_through_the_stream(midi) assert send_message_mock.called is True assert send_message_mock.call_count == 1 cmd = mappings_real_time[0]['o-type'] expected = call({ 'type': cmd, 'channel': None, 'status': None, 'level': None }) assert send_message_mock.call_args == expected
def test_set_program2(send_message_mock, mappings_set_program): store.update('mappings', mappings_set_program) store.update('active_bank', 0) assert send_message_mock.called is False assert send_message_mock.call_count == 0 # Program change 1 midi = Message(type='note_on', channel=8, note=99, velocity=127) send_midi_through_the_stream(midi) assert send_message_mock.called is True assert send_message_mock.call_count == 3 calls = send_message_mock.call_args_list assert calls[0][0][0]['type'] == 'note_on' assert calls[1][0][0]['type'] == 'program_change' assert calls[2][0][0]['type'] == 'note_off' # Program change 2 midi = Message(type='note_on', channel=9, note=111, velocity=127) send_midi_through_the_stream(midi) assert send_message_mock.call_count == 6 calls = send_message_mock.call_args_list assert calls[3][0][0]['type'] == 'note_off' assert calls[4][0][0]['type'] == 'note_on' assert calls[5][0][0]['type'] == 'program_change'
def generatePianoMIDI(): mid = MidiFile(type=0) track = MidiTrack() mid.tracks.append(track) notes = range(20, 120) #range(40, 90); for i in range(0, 20): note = random.choice(notes) track.append( Message('note_on', note=note, velocity=random.randint(50, 127), time=i * 100)) track.append( Message('note_off', note=note, velocity=random.randint(50, 127), time=(i + random.randint(1, 5)) * 100)) mid.save('random.mid')
def arpChord2(root, duration, track): root = root - 12 duration = int(duration) track.append(Message('control_change', channel=0, control=64, value=127)) track.append( Message('note_on', note=root, velocity=rd.randrange(-20, 20) + 40, time=0)) track.append(Message('note_off', note=root, velocity=127, time=duration)) track.append( Message('note_on', note=root + 7, velocity=rd.randrange(-20, 20) + 40, time=0)) track.append( Message('note_off', note=root + 7, velocity=127, time=duration)) track.append( Message('note_on', note=root + 12, velocity=rd.randrange(-20, 20) + 40, time=0)) track.append( Message('note_off', note=root + 12, velocity=127, time=duration)) track.append( Message('note_on', note=root + 7, velocity=rd.randrange(-20, 20) + 40, time=0)) track.append( Message('note_off', note=root + 7, velocity=127, time=duration)) track.append(Message('control_change', channel=0, control=64, value=0))
def save_inferences_to_midi(inferences, filename='Contrapunctus_XIV.mid'): print('Producing Midi file...') outfile = MidiFile() temp = bpm2tempo(48) # or 76? # print('ticks_per_beat:', outfile.ticks_per_beat) outfile.ticks_per_beat = 2496 for voice in range(len(inferences)): track = MidiTrack() outfile.tracks.append(track) track.append(MetaMessage('set_tempo', tempo=temp)) track.append(Message('program_change', program=1)) for inf in inferences[voice]: t = int(second2tick(inf.duration / 10.0, outfile.ticks_per_beat, temp)) track.append(Message('note_on', velocity=64, note=inf.note, time=t if inf.note == 0 else 0)) track.append(Message('note_off', velocity=64, note=inf.note, time=0 if inf.note == 0 else t)) outfile.save(filename) print('MidiFile saved...') return filename
def create_track(details, composer): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(Message('program_change', program=1, time=0)) for n in details: v = int(n[1]) if 0 < v < 35: v = 35 elif v > 105: v = 105 track.append( Message('note_on', note=int(n[0]), velocity=v, time=int(n[2]))) now = datetime.now() current_time = now.strftime("%H-%M-%S") filename = 'ngrams_music/compositions/' + composer + "_" + current_time + '.mid' mid.save(filename) return mid, filename
def parse_to_midi(s: NoteSequence): """Parse NoteSequence to midi.""" scale = [60, 63, 65, 67, 69, 70, 72, 75, 77, 79] mid = MidiFile() track = MidiTrack() mid.tracks.append(track) for n in s: print(n) played_notes = [v for k, v in enumerate(scale) if n[k]] if played_notes: for note in played_notes: track.append(Message('note_on', note=note, time=0)) track.append( Message('note_off', note=played_notes[0], time=int(960 * n.duration))) for note in played_notes[1:]: track.append(Message('note_off', note=note, time=0)) print(f'Total length of song: {mid.length}') print(f'Total number of NoteEvents: {len(s)}') print('Writing song as midi file.') mid.save('test_song.mid')
def toMIDI(filename, ch, notes, rms, onset_start_times, onset_end_times, nOnsets): print 'Transcribing to MIDI in ' + filename delta = 0 with MidiFile() as outfile: track = MidiTrack() outfile.tracks.append(track) for i in range(nOnsets): stime = int((onset_start_times[i] - delta) * 1000) message = Message('note_on', note=int(notes[i]), velocity=int(rms[i] * 127), time=stime) message.channel = ch track.append(message) etime = int((onset_end_times[i] - delta) * 1000) off_message = Message('note_off', note=int(notes[i]), velocity=int(rms[i] * 127), time=etime) off_message.channel = ch track.append(off_message) delta = onset_end_times[i] outfile.print_tracks() outfile.save('/media/sf_VMshare/' + filename) print 'Transcription successfull!'
def generate_midi(mkv): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(Message('program_change', program=0, time=0)) counter = 0 tonic_hit = False while counter < constants.minimum_duration or not tonic_hit: note = mkv.get_state()[0] if note == 11: note = -1 track.append(Message('note_on', note=60 + note, velocity=64, time=0)) track.append( Message('note_off', note=60 + note, velocity=32, time=int(mkv.get_state()[1] * 480 * 4))) if counter > constants.minimum_duration and mkv.get_state()[0] == 0: tonic_hit = True counter += 1 mkv.next_state() mid.save(constants.save_midi_path + 'new_song.mid')
def createMidiFromPianoRoll(piano_roll, lowest_note, directory, mel_test_file, threshold, res_factor=1): ticks_per_beat = 1024 mid = MidiFile(type=1, ticks_per_beat=ticks_per_beat) track = MidiTrack() mid.tracks.append(track) mid_files = [] for k in range(piano_roll.shape[1]):#initial starting values if piano_roll[0, k] == 1: track.append(Message('note_on', note=k+lowest_note, velocity=70, channel = 2, time=0)) time_off = 1008 for k in range(piano_roll.shape[1]):#initial starting values if piano_roll[0, k] == 1: track.append(Message('note_off', note=k+lowest_note, velocity=30, channel = 2, time=time_off)) time_off = 0 for j in range(int (piano_roll.shape[0]/8)):#noire bass step = 8*j time_on = 16 for k in range(piano_roll.shape[1]): if piano_roll[step,k] == 1: track.append(Message('note_on', note=k+lowest_note, velocity=70,channel = 2, time=time_on)) time_on = 0 time_off = 1008 for k in range(piano_roll.shape[1]): if piano_roll[step,k] == 1: track.append(Message('note_off', note=k+lowest_note, velocity=30,channel = 2, time=time_off)) time_off = 0 mid.save('%s%s_th%s.mid' %(directory, mel_test_file, threshold)) mid_files.append('%s.mid' %(mel_test_file)) return
def pianorollToMidi(piano_roll, filepath): # ensure that resolution is an integer ticks_per_time_slice = 1 # hard-coded, arbitrary but needs to be >= 1 and an integer to avoid distortion tempo = 1 / time_per_time_slice resolution = 60 * ticks_per_time_slice / (tempo * time_per_time_slice) mid = MidiFile(ticks_per_beat=int(resolution)) track = MidiTrack() mid.tracks.append(track) track.append(MetaMessage('set_tempo', tempo=int(MICROSECONDS_PER_MINUTE / tempo), time=0)) current_state = np.zeros(input_dim) index_of_last_event = 0 for slice_index, time_slice in enumerate(np.concatenate((piano_roll, np.zeros((1, input_dim))), axis=0)): note_changes = time_slice - current_state for note_idx, note in enumerate(note_changes): if note == 1: note_event = Message('note_on', time=(slice_index - index_of_last_event) * ticks_per_time_slice, velocity=65, note=note_idx + lowest_note) track.append(note_event) index_of_last_event = slice_index elif note == -1: note_event = Message('note_off', time=(slice_index - index_of_last_event) * ticks_per_time_slice, velocity=65, note=note_idx + lowest_note) track.append(note_event) index_of_last_event = slice_index current_state = time_slice eot = MetaMessage('end_of_track', time=1) track.append(eot) mid.save(filepath)
def send_midi_message(midi): # select note note = midi_to_note_on_scale(midi) # turn on note on = Message('note_on', channel=13, note=note, velocity=int(midi)) out_port.send(on) # note lenght is either dynamic or static if args.time: ms = args.time else: ms = 10000 / midi # log and sleep if args.verbose: print("lenght:" + str(ms) + "ms velocity:" + str(midi) + " note:" + str(note) + " thread:" + str(threading.currentThread().getName())) time.sleep(ms / 1000.0) # turn off note off = Message('note_off', channel=13, note=note, velocity=int(midi)) out_port.send(off)
def brainfrick_to_midi(bf_string): mid = MidiFile() track = MidiTrack() mid.tracks.append(track) track.append(Message('program_change', program=0, time=0)) for bf_char in bf_string: onote = internal_bf_to_int(bf_char) if onote is -1: continue track.append( Message('note_on', note=onote, velocity=70, time=TRANSLATE_TIME_OFF)) track.append( Message('note_off', note=onote, velocity=127, time=TRANSLATE_TIME_ON)) print(onote) mid.save('new_song.mid')
def outLoop(out_port): global running, stepping while running: time.sleep(1 * 100 / conf.C_FB_FACTOR) if stepping == 0: window.setTimeOut("Open") else: stepping -= 1 window.setTimeOut(stepping) if (conf.C_FB_STYLE == 0 and stepping < 10): cc = Message('control_change', channel=0, control=1, value=int(stepping * 12.7)) out_port.send(cc) if (conf.C_FB_STYLE == 1): cc = Message('control_change', channel=0, control=1, value=int(stepping)) out_port.send(cc)
def reconstruct_midi(midi_filename, midi_segments, absolute_ticks_last_note, length_in_secs_full_song): time_so_far = 0 for midi_segment in midi_segments: # time in seconds to absolute ticks absolute_ticks_midi_segment = [] start_time = midi_segment[0] messages = midi_segment[1] for message in messages: note_on_or_off = message[0] pitch = message[1] scaled_time = message[-1] time = scaled_time + start_time absolute_ticks = time * absolute_ticks_last_note / length_in_secs_full_song absolute_ticks_midi_segment.append( [note_on_or_off, pitch, absolute_ticks]) # time in absolute ticks to delta time delta_time_midi_segment = [] for message in absolute_ticks_midi_segment: note_on_or_off = message[0] pitch = message[1] time = message[-1] delta_time = int(time - time_so_far) delta_time_midi_segment.append([note_on_or_off, pitch, delta_time]) time_so_far = time mid = MidiFile() track = MidiTrack() mid.tracks.append(track) for message in delta_time_midi_segment: note_on_or_off = message[0] pitch = int(message[1]) delta_ticks = message[-1] # debugging/for future use with a dataset other than SMD if type(delta_ticks) != int or delta_ticks < 0: print("time issue") track.append(Message(note_on_or_off, note=pitch, time=delta_ticks)) # for testing by listening to midi (currently written for windows) # str_start_time = str(midi_segment[0]) # filename_format = "C:/Users/Lilly/audio_and_midi/segments/midi/{0}_start_time_{1}.mid" # filename = filename_format.format(midi_filename, str_start_time) # mid.save(filename) return
def note(self, note, velocity, duration): # Automatically turn off the last note that was played lastNote = self._lastNote() if lastNote: self._noteOff(lastNote) # Play a new note self._track.append( Message('note_on', note=note, velocity=velocity, time=int(duration / 2), channel=self._channel)) return self
def null_vel_to_note_off(self): if not self.checking_note_offs(): for track in self.song.tracks: for index,message in enumerate(track): if message.type is 'note_on' and message.velocity is 0: time = message.time channel = message.channel velocity = message.velocity note = message.note track.remove(message) track.insert(index,Message('note_off', channel=channel, note=note, velocity=velocity,time=time)) return self.song else: return self.song
def next_bar(self, piece: Piece) -> Iterable[Message]: barlen = barlength(piece.beatsperbar, piece.bpm) stress = 3 if piece.beatsperbar % 3 == 0 else 2 for i in range(piece.beatsperbar): time = i / piece.beatsperbar * barlen if i == 0: velocity = 90 elif i % stress == 0: velocity = 75 else: velocity = 60 yield Message(type="note_on", note=60, channel=9, velocity=velocity, time=time)
def km2level(kmer, notes): convert = {'A': '00', 'C': '01', 'G': '10', 'T': '11'} binVal = '' for n in kmer: binVal += convert[n] nt = int(binVal[0:7], 2) vl = int(binVal[7:], 2) op = '' if (notes[nt]): notes[nt] = 0 op = 'note_off' else: notes[nt] = 1 op = 'note_on' message = Message(op, note=nt, velocity=vl, time=16) return (message)
def add_meta_info(self): tempo = mido.bpm2tempo(self.bpm) numerator = Fraction(self.time_signature).numerator denominator = Fraction(self.time_signature).denominator super().append(MetaMessage('set_tempo', tempo=tempo)) super().append( MetaMessage('time_signature', numerator=numerator, denominator=denominator)) super().append(MetaMessage('key_signature', key=self.key)) for channel, program in self.instruments.items(): super().append( Message('program_change', channel=int(channel), program=program, time=0))
def __demo(): """Send a beeping middle C note repetitively to the MIDI port.""" msg = Message('note_on', note=60) set_output_port() print("Running on: " + str(midolib.get_input_names()[0])) start_note(68, 100, 0) time.sleep(0.5) while True: stop_note(68, 0) time.sleep(0.5) start_note(68, 100, 0) time.sleep(0.5)
def run_queue_out(player): while (player.running.value): if not player.queue_out.empty( ) and time.monotonic() > player.deadline.value: """ track is array of pairs: first is note number in chord, second is note len (duration) in 1/128. Sum of durations MUST be equal to 128 """ player.play_chord_arpeggio( np.array([[0, 19], [1, 18], [2, 18], [3, 18], [2, 18], [1, 18], [0, 19]])) time.sleep(0.01) if player.last_note_number is not None: note_off = Message('note_off', note=player.last_note_number, velocity=min_velocity, channel=default_ultrasound_channel).bytes() player.midiout.send_message(note_off)
def test_pitchwheel_encode_parse(self): """Encode and parse pitchwheel with value=0.""" a = Message('pitchwheel', pitch=0) b = mido.parse(a.bytes()) self.assertTrue(a == b)
#!/usr/bin/env python import mido, time, random from mido import Message import numpy as np # LAUGH (IN JOY) # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 1) transp = Message('control_change', channel = 4, control = 12, value = 46) # 26 - 46 rate = Message('control_change', channel = 5, control = 12, value = 65) # 70 - 65 length = Message('control_change', channel = 6, control = 12, value = random.randrange(28, 35)) # 28 - 35 rep = Message('control_change', channel = 8, control = 12, value = 40) # 35 - 40 pitch = Message('control_change', channel = 9, control = 12, value = 127) # 100 - 127 decay = Message('control_change', channel = 10, control = 12, value = 86) # 76 - 86 # Create output outport = mido.open_output() # Algorithm: laugh outport.send(track) outport.send(sel) outport.send(msg_on) outport.send(rate) outport.send(length)
#!/usr/bin/env python import mido, time, random, rospy from mido import Message import numpy as np # CHATTER # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 120) transp = Message('control_change', channel = 4, control = 12, value = 30) # from 30 - 60 to 70 - 100 rate = Message('control_change', channel = 5, control = 12, value = 60) # from 60 - 30 to 30 - 0 length = Message('control_change', channel = 6, control = 12, value = 19) pitch = Message('control_change', channel = 9, control = 12, value = 75) # Create output outport = mido.open_output() # Algorithm 1 # Send MIDI transp0 = transp.value rate0 = rate.value outport.send(msg_on) outport.send(track) outport.send(sel)
import mido, time from mido import Message import numpy as np # YUHUU (IN JOY) # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 1) transp = Message('control_change', channel = 4, control = 12, value = 90) # 75 - 90 rate = Message('control_change', channel = 5, control = 12, value = 95) # 85 - 95 length = Message('control_change', channel = 6, control = 12, value = 40) # 30 - 40 rep = Message('control_change', channel = 8, control = 12, value = 38) # 38 pitch = Message('control_change', channel = 9, control = 12, value = 100) # 90 - 100 decay = Message('control_change', channel = 10, control = 12, value = 76) # 86 - 76 # Create output outport = mido.open_output() # Algorithm: the more arousal the more transp and length of the first note (ralentize) # Send MIDI outport.send(msg_on) outport.send(track) outport.send(sel) outport.send(transp) outport.send(rate)
#!/usr/bin/env python import mido, time from mido import Message import numpy as np # TRACKING (IN SATISFACTION) # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 4) sel = Message('control_change', channel = 3, control = 12, value = 4) transp = Message('control_change', channel = 4, control = 12, value = 60) rate = Message('control_change', channel = 5, control = 12, value = 94) length = Message('control_change', channel = 6, control = 12, value = 40) rep = Message('control_change', channel = 8, control = 12, value = 38) pitch = Message('control_change', channel = 9, control = 12, value = 101) decay = Message('control_change', channel = 10, control = 12, value = 76) # Create output outport = mido.open_output() # Algorithm 1 # Send MIDI outport.send(msg_on) outport.send(track) outport.send(sel) outport.send(rate) outport.send(length)
#!/usr/bin/env python import mido, time from mido import Message import numpy as np # DOUBT # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 1, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 1) transp = Message('control_change', channel = 4, control = 12, value = 70) # 40 - 70 rate = Message('control_change', channel = 4, control = 12, value = 15) # 15 - 30 # Create output outport = mido.open_output() # Algorithm 1 # Send MIDI transp0 = transp.value rate0 = rate.value outport.send(msg_on) outport.send(track) outport.send(sel) for i in range (20, 65): transp.value = i + int(np.around(0.8*transp0))
#!/usr/bin/env python import mido, time, random, rospy from mido import Message import numpy as np # CHATTER # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 120) transp = Message('control_change', channel = 4, control = 12, value = random.randrange(65, 102)) rate = Message('control_change', channel = 5, control = 12, value = random.randrange(20, 50)) length = Message('control_change', channel = 6, control = 12, value = 19) pitch = Message('control_change', channel = 8, control = 12, value = 75) # Create output outport = mido.open_output() # Algorithm 1 # Send MIDI outport.send(msg_on) outport.send(track) outport.send(sel) outport.send(length) outport.send(pitch) while not rospy.is_shutdown(): transp.value = random.randrange(50, 100)
def test_sysex(self): original = Message('sysex', data=(1, 2, 3, 4, 5)) parsed = mido.parse(original.bytes()) self.assertTrue(original == parsed)
import mido, time, random from mido import Message import numpy as np # TERROR (IN AFFECTION) # Define controls msg_on = Message('note_on', channel = 3, note = 60, velocity = 90, time = 0) msg_off = Message('note_off', channel = 3, note = 60, velocity = 90, time = 0) track = Message('control_change', channel = 0, control = 12, value = 1) sel = Message('control_change', channel = 3, control = 12, value = 89) length = Message('control_change', channel = 6, control = 12, value = 54) amount = Message('control_change', channel = 7, control = 12, value = 120) pitch = Message('control_change', channel = 9, control = 12, value = 100) # Create output outport = mido.open_output() # Algorithm 1 # Send MIDI outport.send(msg_on) outport.send(track) outport.send(sel) outport.send(length) outport.send(amount) for i in range (100, 111): pitch.value = i time.sleep(0.3) outport.send(pitch)
for pred in prediction: for i in range(0,4): pred[i] = pred[i] * (max_val[i]-min_val[i]) + min_val[i] if pred[i] < min_val[i]: pred[i] = min_val[i] if pred[i] >= max_val[i]: pred[i] = max_val[i] ########################################### ###### SAVING TRACK FROM BYTES DATA ####### mid = MidiFile() track = MidiTrack() mid.tracks.append(track) for note in prediction: # 147 means note_on note = np.insert(note, 1, 144) bytes = np.round(note).astype(int) msg = Message.from_bytes(bytes[1:4]) msg.time = int(note[4]/0.00125) # to rescale to midi's delta ticks. arbitrary value for now. msg.channel = bytes[0] print(msg) track.append(msg) mid.save('new_song.mid')