コード例 #1
0
def raw_to_msg(raw):
    if raw[0] == 144:
        msg = Message.from_bytes(raw[:3])
    else:
        msg = Message('sysex', data=[])
    msg.time = raw[3] / 1000
    return msg
コード例 #2
0
ファイル: recorder.py プロジェクト: thebne/midiate-utils
def record(midi_file, bpm):
    ctx = zmq.Context()
    sock = ctx.socket(zmq.SUB)
    sock.connect(INPUT_ZMQ_URL)
    sock.subscribe(b'')

    track = MidiTrack()
    track.append(
        MetaMessage('track_name',
                    name=f'MIDIate Recorder {str(datetime.now())}',
                    time=0))
    midi_file.tracks.append(track)

    print('Recording...')
    try:
        while True:
            data = sock.recv()
            current_ts, msg = data[:8], data[8:]
            current_ts, = struct.unpack("<Q", current_ts)
            msg = Message.from_bytes(msg)
            msg.time = int(
                second2tick(current_ts / NANOSECONDS_IN_SECONDS,
                            TICKS_PER_BEAT, bpm2tempo(bpm)))
            track.append(msg)
    except KeyboardInterrupt:
        print('Recorder stopped.')
コード例 #3
0
ファイル: midi.py プロジェクト: snkhtm/wave-function-collapse
def export_midi(notes, path, ticks_per_beat):
    notes = np.squeeze(notes, axis=0)
    notes = np.squeeze(notes, axis=0)

    midi_file = MidiFile()
    midi_file.ticks_per_beat = ticks_per_beat
    track = MidiTrack()
    track.append(MetaMessage('set_tempo', tempo=TEMPO))
    midi_file.tracks.append(track)
    for note in notes:
        bytes = note.astype(int)
        msg = Message.from_bytes(bytes[0:3])
        time = int(second2tick(note[3], ticks_per_beat, TEMPO))
        msg.time = time
        track.append(msg)

    print(midi_file)
    midi_file.save(path)
コード例 #4
0
def ParseMIDI(filename):

    notes = []
    mid1 = MidiFile(filename)
    for i, track in enumerate(mid1.tracks):
        for msg in track:
            if not msg.is_meta:
                s = str(msg).split(" ")
                time = str(s[4]).split("=")
                time = int(time[1])
                try:
                    m = Message.from_bytes(msg.bytes())
                    #print(m.note, m.velocity, time)
                    notes.append(m.note)
                except:
                    pass

    return notes
コード例 #5
0
        pred[0] = 24
    elif pred[0] > 102:
        pred[0] = 102
    if pred[1] < 0:
        pred[1] = 0
    elif pred[1] > 127:
        pred[1] = 127
    if pred[2] < 0:
        pred[2] = 0
###########################################

###### SAVING TRACK FROM BYTES DATA #######
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)

for note in prediction:
    # 147 means note_on
    note = np.insert(note, 0, 147)
    bytes = note.astype(int)
    print(note)
    msg = Message.from_bytes(bytes[0:3])
    time = int(
        note[3] /
        0.001025)  # to rescale to midi's delta ticks. arbitrary value for now.
    msg.time = time
    track.append(msg)

mid.save('new_song.mid')
###########################################
コード例 #6
0
notes = []
x = []
y = []
z = []

for i in range(100):
    x = r.randint(40, 80)
    y = r.randint(50, 70)
    z = 0.5 + r.random() * 2

    notes.append([x] + [y] + [z])
    print(notes)

mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)

for note in notes:
    # 1 4 7  M E A N S  N O T E_O N #
    note = np.insert(note, 0, 147)
    bytesOfInt = note.astype(int)
    print(note)
    msg = Message.from_bytes(bytesOfInt[0:3])
    # Rescale to midi delta ticks. Arbitrary value for now
    time = int(note[3] / 0.001025)
    msg.time = time
    track.append(msg)

mid.save('rSong.mid')
コード例 #7
0
audio = MidiFile()
track = MidiTrack()

# Creating a time counter
t = 0

# Iterating through generated notes
for note in notes:
    # Creating a note array
    note = np.asarray([147, note, 67])

    # Converting to bytes
    bytes = note.astype(int)

    # Gathering a step
    step = Message.from_bytes(bytes[0:3])

    # Increasing track counter
    t += 1

    # Applying current time as current step
    step.time = t

    # Appending to track
    track.append(step)

# Appending track to file
audio.tracks.append(track)

# Outputting generated .midi file
audio.save('generated_sample.mid')
コード例 #8
0
from mido import Message
msg = Message('note_on', note=60)
print(msg)
msg = msg.copy(note=100, velocity=127)
print(msg)
msg2 = Message('note_on', note=100, velocity=3, time=6.2)
print(msg2)
print(msg2.bytes())
print(msg2.hex())
msg3 = Message.from_bytes([0x90, 0x42, 0x60])
print(msg3)
print(msg3.dict())
print(msg3.is_meta)
コード例 #9
0
for pred in prediction:
    for i in range(0,4):
        pred[i] = pred[i] * (max_val[i]-min_val[i]) + min_val[i]
        if pred[i] < min_val[i]:
            pred[i] = min_val[i]

        if pred[i] >= max_val[i]:
            pred[i] = max_val[i]
	
###########################################


###### SAVING TRACK FROM BYTES DATA #######
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)

for note in prediction:
    # 147 means note_on
    note = np.insert(note, 1, 144)
    bytes = np.round(note).astype(int)
    msg = Message.from_bytes(bytes[1:4])
    msg.time = int(note[4]/0.00125) # to rescale to midi's delta ticks. arbitrary value for now.
    msg.channel = bytes[0]
    print(msg)
    track.append(msg)

mid.save('new_song.mid')

コード例 #10
0
    def generate_music(self, model, length=3000):
        '''
        Generates the midi file based on the learning
        model - trained model
        length - length of the midi sequence
        '''
        # Generating the music
        # Making predictions
        tic = time.time()
        y_pred = []
        x = self._seed
        x = numpy.expand_dims(x, axis=0)

        print('Making Music...')
        for _ in range(length):
            pred = model.predict(x)
            x = numpy.squeeze(x)
            x = numpy.concatenate((x, pred))
            x = x[1:]
            x = numpy.expand_dims(x, axis=0)
            pred = numpy.squeeze(pred)
            y_pred.append(pred)

        print('Compiling Music File...')
        for p in y_pred:

            # Rescaling the value to 0 - 127
            # and ensuring it's a valid midi file
            p[0] = int(127 * p[0])
            if p[0] < 0:
                p[0] = 0
            elif p[0] > 127:
                p[0] = 127

            p[1] = int(127 * p[1])
            if p[1] < 0:
                p[1] = 0
            elif p[1] > 127:
                p[1] = 127
            # Rescaling the time back to normal time
            p[2] *= self._max_time
            if p[2] < 0:
                p[2] = 0
        # print(y_pred)

        # rendering midi file
        print('Rendering Midi File...')
        pred_mid_song = MidiFile()
        track = MidiTrack()
        pred_mid_song.tracks.append(track)

        for p in y_pred:
            # appending other info as channel(0) and type(147)
            p = numpy.insert(p, 0, 147)

            byte = p.astype(int)
            msg = Message.from_bytes(byte[0:3])
            _time = int(p[3] / 0.001025)
            msg.time = _time
            track.append(msg)

        print('Saving midi file')
        pred_mid_song.save('out/beth_gen1.midi')
        toc = time.time()
        print('Time taken for rendering midi file {}'.format(toc - tic))
        print('Done')