コード例 #1
0
import mido
from mido import MidiFile

output = mido.open_output()
fileName = "~/Desktop/mbot.midi"

for msg in MidiFile(fileName).play():
    output.send(msg)
    print(msg)
コード例 #2
0
def validate_data(path, quant):
    '''Creates a folder containing valid MIDI files.

    Arguments:
    path -- Original directory containing untouched midis.
    quant -- Level of quantisation'''

    path_prefix, path_suffix = os.path.split(path)

    # Handle case where a trailing / requires two splits.
    if len(path_suffix) == 0:
        path_prefix, path_suffix = os.path.split(path_prefix)

    total_file_count = 0
    processed_count = 0

    base_path_out = os.path.join(path_prefix, path_suffix + '_valid')

    for root, dirs, files in os.walk(path):
        for file in files:
            if file.split('.')[-1] == 'mid' or file.split('.')[-1] == 'MID':
                total_file_count += 1
                print('Processing ' + str(file))
                midi_path = os.path.join(root, file)
                try:
                    midi_file = MidiFile(midi_path)
                except (KeyError, IOError, TypeError, IndexError, EOFError,
                        ValueError):
                    print("Bad MIDI.")
                    continue
                time_sig_msgs = [
                    msg for msg in midi_file.tracks[0]
                    if msg.type == 'time_signature'
                ]

                if len(time_sig_msgs) == 1:
                    time_sig = time_sig_msgs[0]
                    if not (time_sig.numerator == 4
                            and time_sig.denominator == 4):
                        print('\tTime signature not 4/4. Skipping ...')
                        continue
                else:
                    # print time_sig_msgs
                    print('\tNo time signature. Skipping ...')
                    continue

                mid = quantize(MidiFile(os.path.join(root, file)), quant)
                if not mid:
                    print('Invalid MIDI. Skipping...')
                    continue

                if not os.path.exists(base_path_out):
                    os.makedirs(base_path_out)

                out_file = os.path.join(base_path_out, file)

                print('\tSaving', out_file)
                midi_file.save(out_file)
                processed_count += 1

    print('\nProcessed {} files out of {}'.format(processed_count,
                                                  total_file_count))
from mido import MidiFile, MidiTrack, Message
from keras.layers import LSTM, Dense, Activation, Dropout
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.optimizers import RMSprop
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import mido

########### PROCESS MIDI FILE #############
mid = MidiFile('midi/original_metheny.mid') #allegroconspirito.mid') # a Mozart piece

notes = []

time = float(0)
prev = float(0)

for msg in mid:
	### this time is in seconds, not ticks
	time += msg.time
	if not msg.is_meta:
		### only interested in piano channel
		if msg.channel == 0:
			if msg.type == 'note_on':
				# note in vector form to train on
				note = msg.bytes() 
				# only interested in the note and velocity. note message is in the form of [type, note, velocity]
				note = note[1:3]
				note.append(time-prev)
				prev = time
				notes.append(note)
コード例 #4
0
from mido import MidiFile
from mido.midifiles import MidiTrack

with MidiFile() as new_mid:
    new_track = MidiTrack()

    mid = MidiFile('midi/someone-like-you.mid')

    print('mid', mid)
    for i, track in enumerate(mid.tracks):
        print('len(track)', len(track))
        for message in track:
            print('message', message)
            new_track.append(message)

    new_mid.tracks.append(new_track)

    print('ALL TRACKS APPENDED')
    new_mid.save('new_song.mid')
コード例 #5
0
def midiToText(filename):

    tqdm.write('File: {}'.format(filename))

    # read from input filename
    mid = MidiFile(filename)

    tqdm.write('Length (sec): {}'.format(mid.length))
    tqdm.write('Ticks per beat: {}'.format(
        mid.ticks_per_beat))  # e.g. 96/480 per beat (crotchet)

    # check for muliple tempos (e.g. change in tempo halfway through piece)
    check_multiple_tempos = []

    # instantiate final tempo for piece
    tempo = 120
    """
    What is a channel? vs What is a track?
    e.g.:
    • Track 1 contains the notes played by right hand with a piano voice on channel 0.
    • Track 2 contains the notes played by left hand with the same piano voice on channel 0, too.
    • Track 3 contains the bass voice on channel 1.
    • Track 4 contains a clarinet voice on channel 2.
    • Track 5 contains the drums on channel 9.
    """

    for i, track in enumerate(mid.tracks):

        tqdm.write('Track {}: {}'.format(i, track.name))

        for msg in track:
            if msg.type == 'set_tempo':  # Note: is_meta
                msg_bpm = mido.tempo2bpm(
                    msg.tempo
                )  # convert from microseconds to bpm (e.g. 500000 us to 120 bpm)
                msg_bpm_int = int(msg_bpm)
                if msg_bpm != msg_bpm_int:
                    warnings.warn(
                        'Non-integer bpm: {} (tempo) -> {} (bpm)'.format(
                            msg.tempo, msg_bpm))
                check_multiple_tempos.append(msg_bpm_int)

    if len(check_multiple_tempos) > 1:
        warnings.warn('Multiple tempos: {}'.format(check_multiple_tempos))
        tempo = check_multiple_tempos[0]

    elif len(check_multiple_tempos) == 0:  # does this even happen?
        warnings.warn('No tempo: setting default 120')
        tempo = 120

    else:  # only one tempo
        tempo = check_multiple_tempos[0]

    print('Tempo: {}'.format(tempo))

    # get total time of piece
    # mid.length returns total playback time in seconds
    length_in_ticks = mid.length / 60 * tempo * mid.ticks_per_beat  #mido.second2tick(mid.length, ticks_per_beat=mid.ticks_per_beat, tempo=tempo)
    print(length_in_ticks)

    # contains arrays of messages (only notes) for each track
    messages_list = []

    for i, track in enumerate(mid.tracks):

        # create new nested list for each track
        messages_list.append([])

        for msg in track:

            if msg.type == 'note_on':
                messages_list[i].append(msg)

            elif msg.type == 'note_off':
                # convert to note_on with velocity=0
                new_msg = mido.Message('note_on',
                                       note=msg.note,
                                       velocity=0,
                                       time=msg.time)
                messages_list[i].append(new_msg)

    # remove empty lists
    messages_list = [track for track in messages_list if len(track) > 0]

    # group elements into similar delta times (e.g. time: [48, 0], [96, 0, 0, 0])

    grouped_messages_list = []
    for x, track in enumerate(messages_list):
        grouped_messages_list.append([])

        count = 0
        temp_count = 0
        while count < len(track):

            # add current msg (should be time ≠ 0)
            new_group = {'group': [track[count]], 'time': track[count].time}

            # add one for current msg added at start
            count += 1

            # freeze current value of count
            temp_count = count

            # add all following msgs that are time = 0
            for i in range(len(track) - temp_count):

                msg = track[temp_count + i]
                if msg.time == 0:
                    new_group['group'].append(msg)
                    count += 1

                # break before next non-zero time
                else:
                    break

            # append temp grouped msgs back to group_messages
            grouped_messages_list[x].append(new_group)
    """
    Generation of text
    
    Set top track (lowest index) to be 'melody'
    With all other tracks to be 'accomp<n>' where <n> will be an integer starting from 0 (accompaniment)
    
    Note: Actual "theoretical" melody may cross over into other tracks (i.e. "accompaniment")
    """

    # instantiate text list
    # [CLS] (classification) used for indicating start of input (using other model standards)
    result_list = ['[CLS]', 'tempo{}'.format(tempo), '[127]']

    # loop through grouped messages and check for delta time differences between tracks

    # to keep track of time passed during the piece
    current_wait_time_elapsed = 0
    time_embed_counter = 126
    time_embed_interval = math.ceil(
        length_in_ticks / 127
    )  # rounding up - should prevent underflow of time i.e. [0] comes before end of piece

    while max(len(track) for track in grouped_messages_list) > 0:

        all_first_groups = []
        for t in grouped_messages_list:

            # if track is empty replace with None
            if len(t) == 0:
                all_first_groups.append(None)

            else:
                # use pop to remove from list
                all_first_groups.append(t.pop(0))

        # all first times - use None for empty tracks (already replaced with None above)
        all_first_times = [
            group['time'] if group is not None else None
            for group in all_first_groups
        ]

        # get min times in all_first_times ignoring None
        min_dt = min(t for t in all_first_times if t is not None)

        # append wait
        if min_dt != 0:
            wait_text = 'wait:{}'.format(min_dt)
            result_list.append(wait_text)

            current_wait_time_elapsed += min_dt

            if time_embed_counter != 0:

                # check for insertion of wait (word) embedding
                if current_wait_time_elapsed > time_embed_interval:

                    time_embed_multiple = current_wait_time_elapsed // time_embed_interval
                    time_pushover = current_wait_time_elapsed - time_embed_interval * time_embed_multiple

                    word_embedding = '[{}]'.format(time_embed_counter)
                    result_list.append(word_embedding)

                    current_wait_time_elapsed = time_pushover
                    time_embed_counter -= time_embed_multiple

                    # time_embed_counter cannot be 0 due to integer rounding

        for i, track_group in enumerate(all_first_groups):

            # check if None (no notes left in that track)
            if track_group is None:
                continue

            if all_first_times[i] == min_dt:
                for msg in track_group['group']:

                    # convert from 1-88 to A4
                    note = music21.note.Note(msg.note)
                    note_name = note.nameWithOctave

                    track_type = 'melody' if i == 0 else 'accomp{}'.format(i -
                                                                           1)
                    new_text = '{track_type} v{vel} {note}'.format(
                        track_type=track_type,
                        vel=msg.velocity,
                        note=note_name)

                    result_list.append(new_text)

            elif all_first_times[i] > min_dt:

                time_difference = all_first_times[i] - min_dt

                # prepend filler wait to remaining track
                new_filler_group = {
                    'group': all_first_groups[i]['group'],
                    'time': time_difference
                }  # no need to .copy()
                grouped_messages_list[i].insert(0, new_filler_group)

        # Possible scenario: ONLY if at the start, one track has a rest e.g. time=96

    print('Final time embedding: {}'.format(time_embed_counter))
    result_list.append('[0]')
    result_list.append('[SEP]')

    result_string = ' '.join(result_list)

    return result_string
コード例 #6
0
def get_max_channel():
    max = -1
    for msg in midi_file:
        try:
            if msg.channel > max:
                max = msg.channel
        except:
            i = 0
    return max


def copy_note(item, n, velocity, length):
    item.copy_note(note=n, velocity=velocity, time=length)


def copy_file(file):
    mid = MidiFile()
    for i, track in enumerate(file.tracks):
        mid.tracks.append(MidiTrack())
        for msg in track:
            if msg.type == 'note_on' or msg.type == 'note_off' or msg.type == 'program_change':
                mid.tracks[i].append(msg.copy())
    filename = '../generated.mid'
    mid.save(filename)
    return filename


file_name = '../../Example MIDI Files/Mario_something.mid'
midi_file = MidiFile(file_name)

print_messages()
コード例 #7
0
from mido import Message, MidiFile, MidiTrack, MetaMessage

mid = MidiFile(type=0)
track = MidiTrack()
mid.tracks.append(track)

track.append(MetaMessage('key_signature', key='C#'))
track.append(Message('program_change', program=12, time=0))
track.append(Message('note_on', note=64, velocity=64, time=32))
track.append(Message('note_off', note=64, velocity=127, time=32))

mid.save('new_song.mid')
コード例 #8
0
# from Generative Music's Class 2 Code
from mido import MidiFile
file_name = '21_knives_out'
mid = MidiFile('dataset/radiohead/' + file_name + '.mid')

# look at the track names
for i, track in enumerate(mid.tracks):
    print((i, track.name))

# create array of notes
notes = []
messages = []
for message in mid.tracks[7]:
    messages.append(message)

for m in range(len(messages)):
    # print messages[m]
    note = ""
    time = ""
    if messages[m].type == 'note_on':
        message_components = str(messages[m]).split(' ')
        for item in message_components:
            if 'note=' in item:
                # notes.append(item.split('note=')[1])
                note = item.split('note=')[1]
        message_components = str(messages[m + 1]).split(' ')
        for item in message_components:
            if 'time=' in item:
                time = item.split('time=')[1]
    if note != "":
        notes.append(str(note + "_" + time))
コード例 #9
0
ファイル: midiMain.py プロジェクト: nicksam112/DeepMIDI
def runModel():
    if not args.train:
        if args.conv:
            model = createConvModel()
        else:
            model = createLSTMModel()
        model.load_weights(args.load_weights)

    #start with some random notes to generate off of
    ls = []
    for x in range(lengthSample):
        ls.append(random.randint(0, dataInd.shape[0]))

    #set up midi file
    mid = MidiFile()
    track = MidiTrack()
    mid.tracks.append(track)
    track.append(Message('program_change', program=12))

    time = 0
    #run for 10 seconds
    while time < 10.:
        print(str(ls[-5:]) + " " + str(time))

        #predict
        res = model.predict(
            np.expand_dims(np.array(ls[-lengthSample:]), axis=0))[0]
        listAdd = res

        #convert to note
        res = indToNote(np.argmax(np.array(res)))
        time += res[2]
        res[0] = np.rint(res[0])
        res[1] = np.rint(res[1] * 127)
        res[2] = np.rint(res[2] * 880)

        if (res[0] > 0):
            track.append(Message('note_on', note=int(res[1]),
                                 time=int(res[2])))
        else:
            track.append(
                Message('note_off', note=int(res[1]), time=int(res[2])))

        #convert back to a format the mdoel can read
        res[1] = res[1] / 127.
        res[2] = res[2] / 880.

        #append the note to the running list of notes
        ls.append(np.argmax(np.array(listAdd)))

        #ocasionally add random notes
        #helps avoid loops
        if time % 2. <= 0.05:

            addInd = random.randint(0, len(data[data.shape[0] - 1]))
            add = data[data.shape[0] - 1][addInd:addInd + 5]
            # for x in range(5):
            # 	add[x] = noteToInd(add[x])
            ls += add
            #ls.append(random.randint(0,dataInd.shape[0]))
    mid.save(args.save_song_path + 'new_song.mid')
コード例 #10
0
ファイル: generator.py プロジェクト: owen8877/markov-midi
def showSampleInfo(input: str):
    from mido import MidiFile
    midi = MidiFile('music/sample/{}.mid'.format(input))
    for i, t in enumerate(midi.tracks):
        print('Track: {}, Instrument: {}'.format(i, t.name))
コード例 #11
0
def load_midi_file(files_mid):
    mid = MidiFile(files_mid, clip=False)
    if (mid.length >= 30):
        return mid
    else:
        return 0
コード例 #12
0
    def __init__(self, music_dir):

        log(INFO, 'Setting up MIDI reader')

        self.files = []
        self.files_count = 0
        self.play_in_progress = False
        self.play_file_index = -1
        self.play_event_index = -1

        log(INFO, 'Scanning {} directory for MIDI files'.format(music_dir))

        for dirname, dirnames, filenames in sorted(os.walk(music_dir)):

            for filename in sorted(filenames):

                fullname = os.path.join(dirname, filename)
                data = MidiFile(fullname, clip=True)
                tempo = midi.get_midi_file_tempo(data)
                events = midi.get_midi_file_events(data)
                length = int(data.length)

                if data.type == 2:

                    log(
                        WARNING,
                        'Dropping {}, as of unsupported MIDI type 2'.format(
                            filename))

                elif tempo == 0:

                    log(
                        WARNING,
                        'Dropping {}, as no valid tempo was found'.format(
                            filename))

                elif len(events) == 0:

                    log(
                        WARNING, 'Dropping {}, as no events were found'.format(
                            filename))

                else:

                    file_data = {}

                    file_data['name'] = filename
                    file_data['tempo'] = tempo
                    file_data['length'] = length
                    file_data['events'] = events
                    file_data['events_count'] = len(events)

                    log(
                        INFO, 'Registered file #{}: {}'.format(
                            self.files_count, filename))

                    self.files.append(file_data)
                    self.files_count += 1

        log(INFO, 'Found & parsed {} MIDI files'.format(self.files_count))

        return
コード例 #13
0
midiFile = 'SuperSmashBrosUltimate.mid'

# Other startup stuff
send = "\n"
time.sleep(3)


# Sends code to Arduino
def sendLine(code):
    print(int(code))
    ser.write((code + "\n").encode())
    # ser.write(send.encode())


# Opens and reads Midi file
for msg in MidiFile(midiFile):
    time.sleep(msg.time * 0.8)
    if not msg.is_meta:
        data = str(msg)

        # Filters out other initializing stuff
        if data[0:4] == "note":

            # If drive should turn on
            if data[6:7] == "n":
                if data[16] == "0":
                    code = ("3" + str(hertz[int(data[23:25])]) + "1")
                    sendLine(code)
                else:
                    code = ("2" + str(hertz[int(data[23:25])]) + "1")
                    sendLine(code)
コード例 #14
0
def music(Key=0,
          BPM=0,
          Chord_prog=[0],
          beat=[0],
          song_name='new_song.mid',
          arpeggio=False,
          duree=8,
          instrumentbass=0,
          instrumentmain=0,
          probjump=0,
          probautocorr=0,
          probnextchordnote=0):
    print(duree)
    if Key == 0:
        Key = rd.randrange(36, 49)
    if BPM == 0:
        BPM = rd.randrange(50, 400)
    tempo = 60000 / BPM
    if Chord_prog == [0]:
        Chord_prog = cpg()
    Chord_roots = chord_root(Chord_prog)

    if beat == [0]:
        beat = bg_seq_combine(duree // 2)

    if instrumentbass == 0:
        instrumentbass = rd.randrange(0, 121)

    if instrumentmain == 0:
        instrumentmain = rd.randrange(0, 121)

    if probjump == 0:
        probjump = rd.randrange(1, 10)
    if probautocorr == 0:
        probautocorr = rd.random()
    if probnextchordnote == 0:
        probnextchordnote = rd.random()

    print('Chord progression:', Chord_prog, ', Key:', Key, ', BPM:', BPM,
          ', Beat:', beat)

    mid = MidiFile()
    track1 = MidiTrack()
    track2 = MidiTrack()
    track3 = MidiTrack()
    track4 = MidiTrack()
    melody = MidiTrack()
    mid.tracks.append(track1)
    mid.tracks.append(track2)
    mid.tracks.append(track3)
    mid.tracks.append(track4)
    mid.tracks.append(melody)

    track1.append(Message('program_change', program=instrumentbass, time=2))
    track2.append(Message('program_change', program=instrumentbass, time=2))
    track3.append(Message('program_change', program=instrumentbass, time=2))
    track4.append(Message('program_change', program=instrumentbass, time=2))
    melody.append(Message('program_change', program=instrumentmain, time=2))

    refrain = songbloc(Chord_prog, duree, couplet=0)
    couplet = songbloc(Chord_prog, duree, couplet=1)
    if Chord_prog[0] == 1:
        bridge_chords = cpg(starting_value=[2, 3, 6][rd.randrange(0, 3)])
    else:
        bridge_chords = cpg(starting_value=[1, 4, 5][rd.randrange(0, 3)])

    bridge = songbloc(bridge_chords, duree / 2, couplet=0)

    #### intro (melodie vide)
    melody.append(Message('note_on', note=32, velocity=0, time=0))
    melody.append(
        Message('note_off', note=32, velocity=127, time=int(4 * 8 * tempo)))
    melodie_tot = couplet[0] + refrain[0] + couplet[0] + refrain[0] + refrain[
        0]  #+bridge[0]+bridge[0]+refrain[0]+refrain[0]
    list_list_temps = couplet[1] + refrain[1] + couplet[1] + refrain[
        1] + refrain[1]  #+bridge[1]+bridge[1]+refrain[1]+refrain[1]
    # melodie_tot = bridge[0]
    # list_list_temps = bridge[1]
    print(melodie_tot)
    loop = 1
    flat_list_temps = []
    for sublist in list_list_temps:
        for item in sublist:
            flat_list_temps.append(item)
    print(flat_list_temps)
    for j in range(loop):
        for i in range(len(flat_list_temps)):
            lanote = Key + 12 + melodie_tot[i]
            temps = int(flat_list_temps[i] * tempo)

            melody.append(
                Message('note_on',
                        note=lanote,
                        velocity=rd.randrange(-20, 20) + 64,
                        time=0))
            melody.append(
                Message('note_off', note=lanote, velocity=127, time=temps))

    muted_beat = [[rd.randrange(0, 2) for x in range(len(beat))]
                  for y in range(4)]
    random_arpeggio = rd.randrange(0, 3)
    print(muted_beat)

    for x in range(4):
        for i in Chord_roots:
            if arpeggio == True or random_arpeggio == 1:
                arpChord2(Key + i, tempo, track1)
                if duree == 8:
                    arpChord2(Key + i, tempo, track1)
            else:
                Chord(Key + i, tempo, track1, beat, velocity=muted_beat[0])
                Chord(Key + i + 7, tempo, track2, beat, velocity=muted_beat[1])
                Chord(Key + i + 12,
                      tempo,
                      track3,
                      beat,
                      velocity=muted_beat[2])

                if i in [0, 5, 7]:
                    Chord(Key + i + 4,
                          tempo,
                          track4,
                          beat,
                          velocity=muted_beat[3])
                else:
                    Chord(Key + i + 3,
                          tempo,
                          track4,
                          beat,
                          velocity=muted_beat[3])

    # for x in range(2):
    #     for i in chord_root(bridge_chords):
    #         if arpeggio == True or random_arpeggio==1:
    #             arpChord2(Key + i, tempo, track1)
    #             if duree//2 == 8:
    #                 arpChord2(Key + i, tempo, track1)
    #         else:
    #             Chord(Key + i, tempo, track1, beat, velocity=muted_beat[0])
    #             Chord(Key + i + 7, tempo, track2, beat, velocity=muted_beat[1])
    #             Chord(Key + i + 12, tempo, track3, beat, velocity=muted_beat[2])
    #
    #             if i in [0, 5, 7]:
    #                 Chord(Key + i + 4, tempo, track4, beat, velocity=muted_beat[3])
    #             else:
    #                 Chord(Key + i + 3, tempo, track4, beat, velocity=muted_beat[3])

    for x in range(3):
        for i in Chord_roots:
            if arpeggio == True or random_arpeggio == 1:
                arpChord2(Key + i, tempo, track1)
                if duree == 8:
                    arpChord2(Key + i, tempo, track1)
            else:
                Chord(Key + i, tempo, track1, beat, velocity=muted_beat[0])
                Chord(Key + i + 7, tempo, track2, beat, velocity=muted_beat[1])
                Chord(Key + i + 12,
                      tempo,
                      track3,
                      beat,
                      velocity=muted_beat[2])

                if i in [0, 5, 7]:
                    Chord(Key + i + 4,
                          tempo,
                          track4,
                          beat,
                          velocity=muted_beat[3])
                else:
                    Chord(Key + i + 3,
                          tempo,
                          track4,
                          beat,
                          velocity=muted_beat[3])
    #fin
    #arpChord2(Key + Chord_roots[0], tempo, track1)
    tempo = int(tempo)
    track1.append(
        Message('note_on',
                note=Key + Chord_roots[0] - 12,
                velocity=(rd.randrange(-20, 20) + 40),
                time=0))
    track1.append(
        Message('note_off',
                note=Key + Chord_roots[0] - 12,
                velocity=127,
                time=tempo * 4))
    track2.append(
        Message('note_on',
                note=Key + Chord_roots[0] + 7 - 12,
                velocity=(rd.randrange(-20, 20) + 40),
                time=0))
    track2.append(
        Message('note_off',
                note=Key + Chord_roots[0] + 7 - 12,
                velocity=127,
                time=tempo * 4))
    track3.append(
        Message('note_on',
                note=Key + Chord_roots[0],
                velocity=(rd.randrange(-20, 20) + 40),
                time=0))
    track3.append(
        Message('note_off',
                note=Key + Chord_roots[0],
                velocity=127,
                time=tempo * 4))
    if Chord_roots[0] in [0, 5, 7]:
        track4.append(
            Message('note_on',
                    note=Key + Chord_roots[0] + 4 - 12,
                    velocity=(rd.randrange(-20, 20) + 40),
                    time=0))
        track4.append(
            Message('note_off',
                    note=Key + Chord_roots[0] + 4 - 12,
                    velocity=127,
                    time=tempo * 4))
    else:
        track4.append(
            Message('note_on',
                    note=Key + Chord_roots[0] + 3 - 12,
                    velocity=(rd.randrange(-20, 20) + 40),
                    time=0))
        track4.append(
            Message('note_off',
                    note=Key + Chord_roots[0] + 3 - 12,
                    velocity=127,
                    time=tempo * 4))
    attributs = {
        'Chord_progression': [Chord_prog],
        'Song_name': song_name,
        'Key': Key,
        'BPM': BPM,
        'Beat': [beat],
        'is_arpeggio': arpeggio == True or random_arpeggio == 1,
        'Melody': [melodie_tot],
        'Muted_beat': [muted_beat],
        'list_temps': [list_list_temps],
        # 'probability_pause':Average(list_prob_pause),
        # 'probability_2':Average(list_prob_2),
        # 'probability_1':Average(list_prob_1),
        # 'probability_05':Average(list_prob_05),
        'probability_jump': probjump,
        'probability_autocorrelation': probautocorr,
        'probability_next_chord_note': probnextchordnote,
        'instrument_bass': instrumentbass,
        'instrument_main': instrumentmain
    }

    data = pd.DataFrame(attributs,
                        columns=[
                            'Chord_progression', 'Song_name', 'Key', 'BPM',
                            'Beat', 'is_arpeggio', 'Melody', 'Muted_beat',
                            'list_temps', 'probability_jump',
                            'probability_autocorrelation',
                            'probability_next_chord_note', 'instrument_bass',
                            'instrument_main'
                        ])
    #'probability_pause', 'probability_2', 'probability_1', 'probability_05',
    mid.save(song_name)
    return (data)
コード例 #15
0
def poop(source, destination, midi_file, stretch, fadeout, rebuild, max_stack):
    """
    Create multiple pitchshifted versions of source video and arrange them to
    the pattern of the midi_file, also arrange the video if multiple notes play
    at the same time.
    """

    print "Reading input files"
    video = VideoFileClip(source, audio=False)
    """
    Non-main tracks are 30% the size of the main and have a white border and a
    margin around them.
    """
    smaller = video.resize(0.3)\
        .margin(mar=2, color=3*[255])\
        .margin(mar=8, opacity=0)
    audio = AudioFileClip(source, fps=44100)
    mid = MidiFile(midi_file)
    ignoredtracks = ["Percussion", "Bass"]

    print "Analysing MIDI file"
    notes = []  # the number of messages in each track
    lowest = 127  # will contain the lowest note
    highest = 0  # will contain the highest note
    for i, track in enumerate(mid.tracks):
        notes.append(0)
        #if track.name in ignoredtracks: continue
        for message in track:
            if message.type == "note_on":
                lowest = min(lowest, message.note)
                highest = max(highest, message.note)
                notes[-1] += 1
    """
    The main track is the one featured in the center. It is probably the one
    with the most notes. Also record the lowest, highest, and average note to
    generate the appropriate pitches.
    """
    maintrack = max(enumerate(notes), key=lambda x: x[1])[0]
    midpitch = int((lowest + highest) / 2)
    print "Main track is probably", str(
        maintrack) + ":", mid.tracks[maintrack].name
    mid.tracks.insert(0, mid.tracks.pop(maintrack))  # move main track to front
    notes.insert(0, notes.pop(maintrack))  # move main note count to front
    print sum(
        notes
    ), "notes ranging from", lowest, "to", highest, "centering around", midpitch

    print "Transposing audio"
    sound = audio.to_soundarray(fps=44100)  # source, original audio
    tones = range(lowest - midpitch,
                  highest - midpitch)  # the range of pitches we need
    pitches = []  # this will contain the final AudioFileClips
    if not os.path.exists("pitches/"):
        print "Creating folder for audio files"
        os.makedirs("pitches/")
    for n in tones:
        """
        Pitches only need to be generated if they do not already exist or if
        we force the creation of new ones. Save them in order in pitches.
        """
        name = "pitches/" + source + "_" + str(n) + ".mp3"
        if not os.path.isfile(name) or rebuild:
            print "Transposing pitch", n
            splitshift(sound, n).write_audiofile(name)
        pitches.append(AudioFileClip(name, fps=44100))

    print "Adding video clips"
    clips = [video.set_duration(1)]  # to set the video size
    positions = [("left", "bottom"), ("right", "bottom"), ("left", "top"),
                 ("right", "top"), ("center", "bottom"), ("center", "top"),
                 ("left", "center"), ("right", "center")]  # non-main tracks
    """
    curpos is the current corner position on the screen and changes with each track.
    cache is used to make a unique file name whenever a new temporary file is created.
    endtime will be used at the end to set the end TextClip. It is the latest time any clip ends.
    """
    curpos = -2
    cache = endtime = 0
    for i, track in enumerate(mid.tracks):
        #if track.name in ignoredtracks: continue
        print("Processing {} notes: {}".format(notes[i], track.name))
        t = 1.0  # not 0 because we added one second of original video for size
        opennotes = []  # will contain all notes that are still playing
        curpos += 1
        for message in track:
            if not isinstance(message, MetaMessage):
                message.time *= stretch
                t += message.time
                if message.type == "note_on":
                    """
                    Add a video clip with the appropriate starting time and
                    pitch. Also add an entry to opennotes (we don't know when
                    the note ends yet).
                    """
                    part = video
                    mainvid = i is 0  # and len(opennotes) is 0
                    if not mainvid: part = smaller
                    part = part\
                        .set_audio(pitches[min(len(pitches)-1, max(0, message.note-lowest))])\
                        .set_start(t/1000)
                    opennotes.append((message.note, len(clips), t))
                    """
                    If this isn't the main track, the video will be smaller and
                    placed at the edge. We'll get a position for each track.
                    If there is more than one video playing in this track, it
                    will be placed slighly closer to the center.
                    """
                    if not mainvid:
                        stackheight = 6
                        part = part.set_position(positions[curpos %
                                                           len(positions)])
                    clips.append(part)
                elif message.type == "note_off":
                    reference = message.note
                    index = 0
                    """
                    Find the note that ended in opennotes using the note.
                    Get the index and start time, remove it from opennotes.
                    """
                    for note in reversed(opennotes):
                        n, j, d = note
                        if n == reference:
                            index = j
                            opennotes.remove(note)
                            break
                    """
                    Get the clip for the open note, set its time to the
                    difference between time now and start time. Have it fade out
                    and update the endtime if needed.
                    """
                    clips[index] = clips[index].set_duration((t - d) / 1000 +
                                                             fadeout)
                    clips[index] = clips[index].crossfadeout(fadeout)
                    endtime = max(endtime, t / 1000 + fadeout)
                if len(clips) == max_stack:
                    """
                    To save some memory, the clips in memory are emptied
                    whenever they reach a certain size. All clips that are closed
                    are merged into one file on disk.
                    """
                    upuntil = len(clips)  # the first open note
                    if len(opennotes) > 0: _, upuntil, _ = opennotes[0]
                    stillopen = clips[upuntil:]
                    print "Stack reached", len(
                        clips), "clips, merging", upuntil
                    """
                    Save a temporary file to disk with all clips we can safely
                    discard from clips.
                    """
                    newcache = destination + ".temporary" + str(cache) + ".mp4"
                    CompositeVideoClip(
                        clips[:upuntil]).write_videofile(newcache)
                    cache += 1
                    """
                    Shift all opennotes' indices down by the number of clips
                    merged and saved to disk. Set clips to be the new, merged
                    clip and any leftover clips.
                    """
                    for i, note in enumerate(opennotes):
                        n, j, d = note
                        opennotes[i] = (n, j - upuntil + 1, d)
                    clips = [VideoFileClip(newcache)] + stillopen

    end = TextClip("pitch.py", font="Arial", color="white", fontsize=70)\
        .set_pos("center")\
        .set_duration(1)\
        .set_start(endtime)
    clips.append(end)  # add an ending frame
    """
    Combine all leftover clips, write them to the final file and remove
    temporary files created before.
    """
    print "Combining", len(clips), "clips"
    final = CompositeVideoClip(clips).set_start(1)
    final.write_videofile(destination)
    clips = []
    if cache == 1:
        print "Removing one temporary file"
    elif cache > 1:
        print "Removing", cache, "temporary files"
    for i in range(0, cache):
        os.remove(destination + ".temporary" + str(i) + ".mp4")
コード例 #16
0
from mido import MidiFile
import sys

# This code dumps MIDI messages to STDOUT
#
# Usage:
# % python dumpmidi.py file1.mid file2.mid ... fileN.mid

files = sys.argv[1:]
for f in files:
    midi = MidiFile(f)
    for i, track in enumerate(midi.tracks):
        for msg in track:
            print str(msg)
コード例 #17
0
ファイル: midi2.py プロジェクト: crr0tz-4-d1nn3r/CTFs
One of our guys found a strange midi file 
lying around on our servers. We think there
might be some hidden data in it. See if you 
can help us out!
<challenge.mid
"""

# midi file can be opened with Audacity
# Midi file format primer:
# http://www.ccarh.org/courses/253/handout/smf/
# First I though it might be in the notes:
# https://www.wavosaur.com/download/midi-note-hex.php
# looked for python libraries that could read and decode
# https://morioh.com/p/144a98b4ab3a
# Message was not in the notes, but in the velocity of the notes

from mido import MidiFile

mid = MidiFile('challenge.mid')

# message in the non-zero velocity info
flag = ''
for msg in mid.tracks[0]:
    if msg.type == 'note_on':
        if msg.velocity != 0:
            flag += chr(msg.velocity)
            if chr(msg.velocity) == '}':
                break

print(flag)
コード例 #18
0
ファイル: Midi2ROM.py プロジェクト: abbati-simone/Midi-Altera
def main():
	try:
		opts, args = getopt.getopt(sys.argv[1:], 'hi:o:p:f:b:t:n:r:', ['help', 'input-file=', 'output-ROM-path=', 'output-parameter-path=', 'output-format=verilog', 'bpm=240', 'output-tracks=', 'ROM-numbering=1', 'ROM-naming-prefix='])
	except getopt.GetoptError as err:
		# print help information and exit:
		print(err) # will print something like "option -a not recognized"
		usage()
		sys.exit(2)

	inputfile = None
	outputROMpath = None
	outputparampath = None
	outputformat = "verilog"
	bpm = 240
	tracks = None
	numbering = 1
	naming_prefix = None
	abs_max_delay_ticks = 0
	abs_min_delay_ticks = 10**10
	abs_max_note_bits = 0
	abs_max_address_bits = 0
	abs_max_delay_bits = 0

	for o, a in opts:
		if o in ("-h", "--help"):
			usage()
			sys.exit()
		elif o in ("-i", "--input-file"):
			inputfile = a
		elif o in ("-o", "--output-ROM-path"):
			outputROMpath = a
		elif o in ("-p", "--output-parameter-path"):
			outputparampath = a
		elif o in ("-f", "--output-format"):
			outputformat = a
		elif o in ("-b", "--bpm"):
			bpm = int(a)
		elif o in ("-t", "--output-tracks"):
			tracks = a.split(',');
		elif o in ("-n", "--ROM-numbering"):
			numbering = int(a)
		elif o in ("-r", "--ROM-naming-prefix"):
			naming_prefix = a
		else:
			assert False, "unhandled option"

	if inputfile is None or outputROMpath is None or outputROMpath is None:
		usage()
		sys.exit(2)

	print("Input file: %s" % (inputfile))
	print("Output ROM path: %s" % (outputROMpath))
	print("Output Parameters path: %s" % (outputparampath))
	print("Output format: %s" % ("Verilog" if outputformat == "verilog" else "Quartus MIF"))
	print("Tracks to extract: %s" % (tracks if tracks is not None else 'ALL'))
	print("ROMs numbering mode: %s" % ('as tracks index extracted' if numbering == 0 else 'squentially from 0'))
	print("ROMs naming prefix: %s" % (naming_prefix if naming_prefix is not None else '[nothing]'))

	mid = MidiFile(inputfile)

	#mido.merge_tracks(tracks)
	
	tracks_parameters = {}
	k = 0
	for (i, track) in enumerate(mid.tracks):
		if tracks is None or str(i) in tracks:
			index = str(i) if numbering == 0 else str(k)
			name_prefix = ('%s_' % (naming_prefix,) if naming_prefix is not None else '') + index
			p = track_parameters(track, mid.ticks_per_beat, outputparampath, index, name_prefix)
			if not p["empty_track"]:

				if p["delay_max"] > abs_max_delay_ticks:
					abs_max_delay_ticks = p["delay_max"]

				if p["delay_min"] < abs_min_delay_ticks:
					abs_min_delay_ticks = p["delay_min"]

				if p["note_nbit"] > abs_max_note_bits:
					abs_max_note_bits = p["note_nbit"]

				if p["messages_address_bits"] > abs_max_address_bits:
					abs_max_address_bits = p["messages_address_bits"]

				if p["delay_nbit"] > abs_max_delay_bits:
					abs_max_delay_bits = p["delay_nbit"]

				tracks_parameters[i] = p
				if outputformat == "verilog":
					outputVerilog(mid, outputROMpath, i, name_prefix, p)
				elif outputformat == "quartus":
					outputQuartus(mid, outputROMpath, i, name_prefix, p)
				k += 1

	name_prefix = '%s_' % (naming_prefix,) if naming_prefix is not None else ''
	with open(os.path.join(outputparampath, "Parameters_%sgeneral.vh" % (name_prefix,)), "w") as file:
		print("parameter ROMS_number=%d;" % (len(tracks_parameters), ), file=file)
		print("parameter note_max_bits=%d;" % (abs_max_note_bits, ), file=file)
		print("parameter address_max_bits=%d;" % (abs_max_address_bits, ), file=file)
		print("parameter delay_max_bits=%d;" % (abs_max_delay_bits, ), file=file)
		tpb = mid.ticks_per_beat
		print("parameter ticks_per_beat=%d;" % (tpb, ), file=file)
		print("parameter BPM=%d;" % (bpm,), file=file)
		bps = bpm/60.0
		print("parameter BPS=%s;" % (bps,), file=file)
		ticks_hz = round(bps * mid.ticks_per_beat)
		print("parameter ticks_hz=%d;" % (ticks_hz, ), file=file)
		delay_clock_per_tick = 1000
		print("parameter delay_clocks_per_tick=%d;" % (delay_clock_per_tick, ), file=file)
		delay_clock_per_hz = round(ticks_hz * delay_clock_per_tick)
		print("parameter delay_clock_hz=%d;" % (delay_clock_per_hz, ), file=file)
		print("parameter delay_reg_bits=%d;" % (int.bit_length(round(delay_clock_per_hz * abs_max_delay_ticks)), ), file=file)

		print("", file=file)
コード例 #19
0
def create_tiles(filepath):

    global tile_limit
    # check to see if tile limit has been set
    if tile_limit is not None:
        single_tile_limit = tile_limit

    # store file as MidiFile obj
    mid = MidiFile(filepath)
    msglist = []

    # append all messages beyond header track to list
    for track in mid.tracks[1:]:
        for msg in track:
            msglist.append(msg)

    # stores accumulated time of each message index
    acc_time_index = {}
    acc_time = 0
    for i in range(0, len(msglist)):
        time = int(re.search(r'time=(\d+)', str(msglist[i])).group(1))
        acc_time += time
        acc_time_index.update({i: acc_time})

    # check each offset of the message list
    for offset in range(0, len(msglist)):
        # check across a range of wavelengths
        for wavelength in range(lower_wavelength, upper_wavelength):
            # validate if the tile repeats
            isvalid = True
            for i in range(wavelength):
                if tile_limit is not None:
                    if single_tile_limit == 0:
                        break
                # ensure values do not exceed list index range
                if offset+i >= len(msglist) or offset+wavelength+i >= len(msglist):
                    isvalid = False
                    break
                if (msglist[offset+i] != msglist[offset+wavelength+i]):
                    isvalid = False
                    break
                # create tile from matching wavelength
                if (isvalid):
                    tile = []
                    for i in range(wavelength):
                        tile.append(msglist[offset+i])

                    # dummy MidiFile created to determine absolute time of tile for metadata
                    temp_mid = mido.MidiFile(type=1)
                    temp_mid.ticks_per_beat = mid.ticks_per_beat
                    track = mido.MidiTrack()
                    if find_program_change(filepath) is not None:
                        prog_change = mido.parse_string(re.sub(
                            r'time=(\d+)', r'time=0', str(find_program_change(filepath))))
                        track.append(prog_change)
                    for line in tile:
                        track.append(line)
                    temp_mid.tracks.append(track)

                    # save tile metadata to json formatted string
                    tick_time = acc_time_index[offset]
                    current_time = mido.tick2second(
                        tick_time, mid.ticks_per_beat, find_tempo(filepath))

                    tile_dict = {
                        'file': filepath,
                        'offset': offset,
                        'wavelength': wavelength,
                        'start_time_seconds': ('%.2f' % current_time),
                        'total_length_seconds': ('%.2f' % temp_mid.length)
                    }
                    meta_dict = json.dumps(tile_dict)

                    # MidiFile to be created as tile
                    new_mid = mido.MidiFile(type=1)
                    new_mid.ticks_per_beat = mid.ticks_per_beat

                    # header track containing same info as original file
                    header_track = mido.MidiTrack()
                    for msg in mid.tracks[0]:
                        if msg.type != 'end_of_track':
                            header_track.append(msg)
                    header_track.append(MetaMessage(
                        'text', text=str(tile_dict), time=0))
                    new_mid.tracks.append(header_track)

                    # music track containing the notes
                    music_track = mido.MidiTrack()
                    # add program change message
                    if find_program_change(filepath) is not None:
                        prog_change = mido.parse_string(re.sub(
                            r'time=(\d+)', r'time=0', str(find_program_change(filepath))))
                        music_track.append(prog_change)
                    # add notes from tile list
                    for line in tile:
                        music_track.append(line)
                    new_mid.tracks.append(music_track)

                    try:
                        # save to new file if tile within valid time range
                        if new_mid.length > 0 and new_mid.length <= maximum_time:
                            file_name = os.path.basename(filepath)
                            instrument = int(
                                re.search(r'program=(\d+)', str(find_program_change(filepath))).group(1))
                            tile_dir = '/tiles/' + \
                                instruments[instrument]+'/'
                            Path(
                                output_dir+tile_dir).mkdir(parents=True, exist_ok=True)

                            new_mid.save(output_dir+tile_dir+'%s_%s_%d_%d.mid' %
                                         (file_name[:-4], instruments[instrument], offset, wavelength))

                            # print info to screen for development
                            """
                            print('\nFile name: %s' % filepath)
                            for i, track in enumerate(new_mid.tracks):
                                print('Track number: %d' % (i+1))
                                for msg in track:
                                    print(msg)
                                print(
                                    '\n____________________________________________________________________________________________________________________________________\n')
                            """

                            try:
                                # save json file
                                json_dir = '/tile_metadata/' + \
                                    instruments[instrument]+'/'
                                Path(
                                    output_dir+json_dir).mkdir(parents=True, exist_ok=True)
                                f = open(output_dir+json_dir+'%s_%s_%d_%d.json' % (
                                    file_name[:-4], instruments[instrument], offset, wavelength), 'w')
                                f.write(meta_dict)
                            except:
                                print('JSON object not created for file: %s\n' %
                                      filepath)
                                for msg in new_mid.tracks[0]:
                                    print(msg)
                                print(
                                    '\n____________________________________________________________________________________________________________________________________\n____________________________________________________________________________________________________________________________________\n\n')

                            # dacrement tile limit for next loop
                            if tile_limit is not None:
                                single_tile_limit -= 1

                    except:
                        print('Error with tile creation for file: %s\nAttempted tile length %d' % (
                            filepath, new_mid.length))
                        for msg in new_mid.tracks[0]:
                            print(msg)
                        print(
                            '\n____________________________________________________________________________________________________________________________________\n____________________________________________________________________________________________________________________________________\n\n')
コード例 #20
0
ファイル: midi.py プロジェクト: BatMattBat/AI
def midi_to_samples(fname):
    has_time_sig = False
    flag_warning = False
    mid = MidiFile(fname)
    ticks_per_beat = mid.ticks_per_beat
    ticks_per_measure = 4 * ticks_per_beat

    for i, track in enumerate(mid.tracks):
        for msg in track:
            if msg.type == 'time_signature':
                new_tpm = msg.numerator * ticks_per_beat * 4 / msg.denominator
                if has_time_sig and new_tpm != ticks_per_measure:
                    flag_warning = True
                ticks_per_measure = new_tpm
                has_time_sig = True
    if flag_warning:
        print "  ^^^^^^ WARNING ^^^^^^"
        print "    " + fname
        print "    Detected multiple distinct time signatures."
        print "  ^^^^^^ WARNING ^^^^^^"
        return []

    all_notes = {}
    for i, track in enumerate(mid.tracks):
        abs_time = 0
        for msg in track:
            abs_time += msg.time
            if msg.type == 'note_on':
                if msg.velocity == 0:
                    continue
                note = msg.note - (128 - num_notes) / 2
                assert (note >= 0 and note < num_notes)
                if note not in all_notes:
                    all_notes[note] = []
                else:
                    single_note = all_notes[note][-1]
                    if len(single_note) == 1:
                        single_note.append(single_note[0] + 1)
                all_notes[note].append(
                    [abs_time * samples_per_measure / ticks_per_measure])
            elif msg.type == 'note_off':
                if len(all_notes[note][-1]) != 1:
                    continue
                all_notes[note][-1].append(abs_time * samples_per_measure /
                                           ticks_per_measure)
    for note in all_notes:
        for start_end in all_notes[note]:
            if len(start_end) == 1:
                start_end.append(start_end[0] + 1)
    samples = []
    for note in all_notes:
        for start, end in all_notes[note]:
            sample_ix = start / samples_per_measure
            while len(samples) <= sample_ix:
                samples.append(
                    np.zeros((samples_per_measure, num_notes), dtype=np.uint8))
            sample = samples[sample_ix]
            start_ix = start - sample_ix * samples_per_measure
            if False:
                end_ix = min(end - sample_ix * samples_per_measure,
                             samples_per_measure)
                while start_ix < end_ix:
                    sample[start_ix, note] = 1
                    start_ix += 1
            else:
                sample[start_ix, note] = 1
    return samples
コード例 #21
0
try:
    assert (len(sys.argv) in [5, 6])
    S_MIDI = sys.argv[1]
    D_MIDI = sys.argv[2]

    S_BEATS_PER_MEASURE = 4
    D_BEATS_PER_MEASURE = 3
    S_BEAT_OFFSET = int(sys.argv[3])
    S_TEMPO_SCALE = 1 / float(sys.argv[4])

    DELETE_FOURTH_MEASURE = (len(sys.argv) == 6)
except:
    print("Usage: waltzify.py source destination offset speed [skip]")
    exit(1)

src = MidiFile(S_MIDI)
dst = MidiFile()


def transform_ticks(s_ticks):
    s_total_beats = (s_ticks / src.ticks_per_beat) + (S_BEATS_PER_MEASURE -
                                                      S_BEAT_OFFSET)
    s_measures = s_total_beats // S_BEATS_PER_MEASURE
    s_beats = s_total_beats - s_measures * S_BEATS_PER_MEASURE
    d_measures = s_measures
    if DELETE_FOURTH_MEASURE:
        d_beats = s_beats if s_beats < 3 else 3
    else:
        d_beats = s_beats if s_beats < 2 else 2 + (s_beats - 2) / 2
    d_total_beats = d_measures * D_BEATS_PER_MEASURE + d_beats
    return int(S_TEMPO_SCALE * ((d_total_beats * src.ticks_per_beat) -
コード例 #22
0
ファイル: Song.py プロジェクト: sl4cks/PoemGeneratedMIDI
 def play(self, midi_file, port):
     outport = mido.open_output(port)
     for msg in MidiFile(midi_file):
         time.sleep(msg.time)
         if not msg.is_meta:
             outport.send(msg)
コード例 #23
0
from mido import MidiFile, Message, MidiTrack

mid = MidiFile('song_encoded.mid')

count_pc = 0

for i, tracks in enumerate(mid.tracks): 
    j = 0   
    while j < len(tracks):
        if (tracks[j].type == 'program_change'):
            if (tracks[j+1] is not None) & (tracks[j+1].type == 'program_change'):
                if (tracks[j+2] is not None) & (tracks[j+2].type == 'program_change'):
                    letter_l = tracks[j].program
                    letter_h = tracks[j+1].program << 4
                    letter_byte = letter_h | letter_l
                    letter = bytes([letter_byte]).decode('utf-8')
                    print(letter)
                    count_pc +1
                    j = j + 2 
        j += 1
コード例 #24
0
import random as r

notes = []
x = []
y = []
z = []

for i in range(100):
    x = r.randint(40, 80)
    y = r.randint(50, 70)
    z = 0.5 + r.random() * 2

    notes.append([x] + [y] + [z])
    print(notes)

mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)

for note in notes:
    # 1 4 7  M E A N S  N O T E_O N #
    note = np.insert(note, 0, 147)
    bytesOfInt = note.astype(int)
    print(note)
    msg = Message.from_bytes(bytesOfInt[0:3])
    # Rescale to midi delta ticks. Arbitrary value for now
    time = int(note[3] / 0.001025)
    msg.time = time
    track.append(msg)

mid.save('rSong.mid')
コード例 #25
0
def save_data(path, quant, one_hot=True):
    '''Creates a folder containing the quantised MIDI files.

    Arguments:
    path -- Quantised directory containing midis.
    quant -- Level of quantisation
    '''

    path_prefix, path_suffix = os.path.split(path)

    # Handle case where a trailing / requires two splits.
    if len(path_suffix) == 0:
        path_prefix, path_suffix = os.path.split(path_prefix)

    array_out = os.path.join(path_prefix, path_suffix + '_inputs')
    velocity_out = os.path.join(path_prefix, path_suffix + '_velocities')

    total_file_count = 0
    processed_count = 0

    for root, dirs, files in os.walk(path):
        for file in files:
            # print os.path.join(root, file)
            if file.split('.')[-1] == 'mid' or file.split('.')[-1] == 'MID':
                total_file_count += 1

                out_array = '{}.npy'.format(os.path.join(array_out, file))
                out_velocity = '{}.npy'.format(os.path.join(
                    velocity_out, file))
                midi_path = os.path.join(root, file)
                midi_file = MidiFile(midi_path)

                print('Processing ' + str(file))
                mid = MidiFile(os.path.join(root, file))

                # mid = quantize(midi_file,
                #                quantization=quant)

                if one_hot:
                    try:
                        array, velocity_array = midi_to_array_one_hot(
                            mid, quant)
                    except (KeyError, TypeError, IOError, IndexError, EOFError,
                            ValueError):
                        print("Out of bounds")
                        continue
                else:
                    array, velocity_array = midi_to_array(mid, quant)

                if not os.path.exists(array_out):
                    os.makedirs(array_out)

                if not os.path.exists(velocity_out):
                    os.makedirs(velocity_out)

                # print out_dir

                print('Saving', out_array)

                # print_array( mid, array)
                # raw_input("Press Enter to continue...")

                np.save(out_array, array)
                np.save(out_velocity, velocity_array)

                processed_count += 1
    print('\nProcessed {} files out of {}'.format(processed_count,
                                                  total_file_count))
コード例 #26
0
ファイル: gui.py プロジェクト: shaaofbla/Minerator
 def loadMidiFile(self):
     self.midi = MidiFile(self.midiFilename)
コード例 #27
0
ファイル: utils.py プロジェクト: spell00/djjudge
def printAll(path):
    mid = MidiFile(path)
    for i, track in enumerate(mid.tracks):
        for msg in track:
            print(msg)
コード例 #28
0
temp = files_pd[0].str.split('\\', expand=True)
temp.rename(columns={
    0: 'dataset',
    1: 'subfolder',
    2: 'filename'
},
            inplace=True)
files_pd = files_pd.merge(temp, left_index=True, right_index=True, how='left')
files_pd.rename(columns={0: 'path'}, inplace=True)
files_pd.reset_index(drop=True, inplace=True)
files_pd['song_idx'] = files_pd.index

for row in files_pd.itertuples():
    print(row[0])
    if (row[0] < 0):
        continue

    try:
        mid = MidiFile(row[1], clip=True)
    except Exception as e:
        continue

    for track_count, track in enumerate(mid.tracks):
        for msg_count, msg in enumerate(track):
            if (msg.type == 'smpte_offset'):
                print(row[0])
                print(msg.dict())
                print(msg.type)
                print(msg.is_meta)
                input('Batman')
コード例 #29
0
def midi_to_txt(input_file, bpm=120, calc_beats=False):

    times = []
    max_time = 0

    infile = MidiFile(input_file)
    ppq = infile.ticks_per_beat

    midi_tempo = bpm2tempo(bpm)
    s_per_tick = midi_tempo / 1000.0 / 1000 / ppq

    file_type = infile.type

    tempo_track = []

    for track_idx, track in enumerate(infile.tracks):
        cur_time = 0
        if file_type == 1:
            if track_idx == 0:  # store track 0 as tempo track
                tempo_track = track
                continue
            else:
                # merge tempo track into current track
                tempo_idx = 0
                track_idx = 0

                cur_track = []
                while tempo_idx < len(tempo_track) or track_idx < len(track):
                    if tempo_idx >= len(tempo_track):
                        cur_track.append(track[track_idx])
                        track_idx += 1
                        continue
                    if track_idx >= len(track):
                        cur_track.append(tempo_track[tempo_idx])
                        tempo_idx += 1
                        continue
                    if tempo_track[tempo_idx].time <= track[track_idx].time:
                        cur_track.append(tempo_track[tempo_idx])
                        track[track_idx].time -= tempo_track[tempo_idx].time
                        tempo_idx += 1
                    else:
                        cur_track.append(track[track_idx])
                        tempo_track[tempo_idx].time -= track[track_idx].time
                        track_idx += 1
        else:
            cur_track = track

        for message in cur_track:
            delta_tick = message.time
            delta_time = delta_tick * s_per_tick
            cur_time += delta_time

            if cur_time > max_time:  # collect max time for beats if necessary
                max_time = cur_time

            if message.type == 'set_tempo':
                midi_tempo = message.tempo
                s_per_tick = midi_tempo / 1000.0 / 1000 / ppq

            if message.type == 'note_on' and message.velocity > 0:
                inst_idx = message.note
                velocity = float(message.velocity) / 127.0
                times.append([cur_time, inst_idx, velocity])

    if calc_beats:
        beat_times = calc_beat_times(copy.deepcopy(infile.tracks[0]), max_time, ppq)
    else:
        beat_times = None

    return times, beat_times
コード例 #30
0
        print(*args, **kwargs)
else:
    vprint = lambda *a, **k: None  #do-nothing function

# Get import and export folder locations
dom = parse(ardourFile)
sessionName = dom.getElementsByTagName("Session")[0].getAttribute("name")

dir = os.path.dirname(ardourFile)
importFolder = os.path.join(dir, "interchange", sessionName, "midifiles")
exportFolder = os.path.join(dir, "export")
vprint(importFolder, exportFolder)

# Iterate through the MIDI tracks in Ardour (called "Routes" in the XML file)
# Gets Ardour track id's and saves track names
mid = MidiFile(type=1, ticks_per_beat=19200)
trackRef = {}  #ardour-track-id : midi-track-id
i = 0
for route in dom.getElementsByTagName("Route"):
    if route.getAttribute("default-type") == "midi":
        rname = route.getAttribute("name")
        if args.omitparens:
            p = re.compile("(.*)(\(.*\))(.*)")
            rname = p.sub(r"\1\3", rname).strip()
        mid.add_track(name=rname)
        mid.tracks[i].append(MetaMessage("instrument_name", name=rname))
        programNumber = getGeneralMidiNumber(rname)
        if programNumber == -10:
            mid.tracks[i].append(
                MetaMessage("channel_prefix", channel=10, time=0))
            if args.musescore: