Esempio n. 1
0
    def fromFile(self, filename):
        class MIDItoNotes(MidiOutStream):
            currentNotes = {}
            ppq = 96
            currentTrack = 0
            outList = []
            def header(self, format=0, nTracks=1, division=96):
                self.ppq = division
            def start_of_track(self, n_track=0):
                self.currentTrack = n_track
                print "start_of_track", n_track
            def note_on(self, channel=0, note=0x40, velocity=0x40):
                self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
            def note_off(self, channel=0, note=0x40, velocity=0x40):
                patch = 1
                if (self.currentTrack, note) in self.currentNotes:
                    out = (self.currentTrack,
                          'note',
                          note,
                          self.currentNotes[(self.currentTrack, note)][0],
                          float(self.abs_time())/1000-self.currentNotes[(self.currentTrack, note)][0],
                          self.currentNotes[(self.currentTrack, note)][1],
                          channel,
                          patch)
                    #print "out:", out
                    self.outList.append(out)
                    del self.currentNotes[(self.currentTrack, note)]
            def tempo(self,value):
                channel = 0
                patch = 0
                self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0,channel,patch))
            def patch_change(self, channel, patch):
                print "patch_change", "channel", channel, "patch", patch
                #self.currentChannel = channel
                #self.currentPatch = patch
            def sysex_event(self, parameter):
                print "sysex", parameter
            #def midi_ch_prefix(self, channel):
            #    print "midi channel:", channel

        event_handler = MIDItoNotes()
        midi_in = MidiInFile(event_handler, filename)

        print "starting read", filename
        try:
            midi_in.read()
        except:
            #todo: this is a hack, it just renames files it can't read
            print "renaming file so it won't be accessed again"
            os.rename(filename, filename + "_unreadable")
        print "finished read", filename

        # probably should not sort like this, who knows, seems like some things could get out of order
        self.events = sorted(event_handler.outList,key=itemgetter(3,0))
        self.ppq = event_handler.ppq
Esempio n. 2
0
 def __init__(self, infile = 'fichiers_midi/internationale.mid', verbose=False):
     self.infile = infile
     self._data = {}
     self._busy_chan = []
     self.verbose = verbose
     self._tempo = 1000000000.0 # empiriquement : 1 noire à la seconde 
     midi_in = MidiInFile(self, self.infile)
     midi_in.read()
     # Une fois le parsing fini, on ajuste les temps et durées pour
     # tenir compte du tempo
     for chan_num, chan_data in self._data.iteritems() :
         for evt in chan_data :
             evt['time'] = int(evt['time'] * self._tempo * 1000)
             evt['duration'] = int(evt['duration'] * self._tempo*1000)
Esempio n. 3
0
def getSpawnList():
    event_handler = TrackReader()

    infile = "notes.mid"
    midi_in = MidiInFile(event_handler, infile)
    midi_in.read()

    enemySpawner = EnemySpawn(event_handler.song)
    enemies = enemySpawner.enemies
    spawnList = []
    for key in range(0, enemySpawner.end + enemySpawner.chunk, enemySpawner.chunk):
        spawnList.append((key, enemies[key]))

    return spawnList
Esempio n. 4
0
    def read(self, song, mode='full'):
        """Parse a midi file and fill with that data the SongSegment list.

        Keyword arguments:
        midi_file -- the midi file to parse

        """

        file_name = os.path.join(self.path, self.file_name)
        f = open(file_name, 'rb')

        # do parsing
        x = MidiToText(song)
        midiIn = MidiInFile(x, f)
        midiIn.read()
        f.close()
    def __init__(self, filename, r=(21, 109), dt=0.2):
        self.notes = []
        self._tempo = 500000
        self.beat = 0
        self.time = 0.0

        midi_in = MidiInFile(self, filename)
        midi_in.read()
        self.notes = [n for n in self.notes
                      if n[2] is not None]  # purge incomplete notes
        length = int(numpy.ceil(max(list(zip(*self.notes))[2]) /
                                dt))  # create piano-roll
        self.piano_roll = numpy.zeros((length, r[1] - r[0]))
        for n in self.notes:
            self.piano_roll[int(numpy.ceil(n[1] /
                                           dt)):int(numpy.ceil(n[2] / dt)),
                            n[0] - r[0]] = 1
Esempio n. 6
0
 def fromFile(self, fileName):
     class MIDItoNotes(MidiOutStream):
         currentNotes = {}
         ppq = 96
         currentTrack = 0
         outList = []
         def header(self, format=0, nTracks=1, division=96):
             self.ppq = division
         def start_of_track(self, n_track=0):
             self.currentTrack = n_track
         def note_on(self, channel=0, note=0x40, velocity=0x40):
             self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
         def note_off(self, channel=0, note=0x40, velocity=0x40):
             self.outList.append((self.currentTrack,'note',note,self.currentNotes[(self.currentTrack,note)][0],float(self.abs_time())/1000-self.currentNotes[(self.currentTrack,note)][0],self.currentNotes[(self.currentTrack,note)][1]))
             del self.currentNotes[(self.currentTrack,note)]
         def tempo(self,value):
             self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0))
     event_handler = MIDItoNotes()
     midi_in = MidiInFile(event_handler, fileName)
     midi_in.read()
     self.events = sorted(event_handler.outList,key=itemgetter(3,0))
     self.ppq = event_handler.ppq
Esempio n. 7
0
 def fromFile(self, fileName):
     class MIDItoNotes(MidiOutStream):
         currentNotes = {}
         ppq = 96
         currentTrack = 0
         outList = []
         def header(self, format=0, nTracks=1, division=96):
             self.ppq = division
         def start_of_track(self, n_track=0):
             self.currentTrack = n_track
         def note_on(self, channel=0, note=0x40, velocity=0x40):
             self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
         def note_off(self, channel=0, note=0x40, velocity=0x40):
             self.outList.append((self.currentTrack,'note',note,self.currentNotes[(self.currentTrack,note)][0],float(self.abs_time())/1000-self.currentNotes[(self.currentTrack,note)][0],self.currentNotes[(self.currentTrack,note)][1]))
             del self.currentNotes[(self.currentTrack,note)]
         def tempo(self,value):
             self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0))
     event_handler = MIDItoNotes()
     midi_in = MidiInFile(event_handler, fileName)
     midi_in.read()
     self.events = sorted(event_handler.outList,key=itemgetter(3,0))
     self.ppq = event_handler.ppq
Esempio n. 8
0
def midi_to_meta_data(midi_file_path):
    ''' returns a list: [(row, col, millisecond_midi_offset), ...] '''
    event_handler = NoteOnHandler()
    midi_in = MidiInFile(event_handler, midi_file_path)
    midi_in.read()
    return event_handler.offsets
Esempio n. 9
0
def midi_to_meta_data(midi_file_path):
    ''' returns a list: [(row, col, millisecond_midi_offset), ...] '''
    event_handler = NoteOnHandler()
    midi_in = MidiInFile(event_handler, midi_file_path)
    midi_in.read()
    return event_handler.offsets
Esempio n. 10
0
def read(midi_file, force_tempo=120, f=None):
    event_handler = MIDIStream(force_tempo=force_tempo, filter_func=f)
    midi_in = MidiInFile(event_handler, midi_file)
    midi_in.read()
    return event_handler.output
Esempio n. 11
0
def buildScore(in_file):
    event_handler = ScoreBuilder()
    midi_in = MidiInFile(event_handler, in_file)
    midi_in.read()
    return event_handler.score
Esempio n. 12
0
def markers(midifile, name, callback, channel=9):
    stream = CarbonOutStream(name=name, channel=channel, callback=callback)

    midi_in = MidiInFile(stream, midifile)
    midi_in.read()
from midi.MidiOutStream import MidiOutStream
from midi.MidiInFile import MidiInFile

"""
This prints all note on events on midi channel 0
"""


class Transposer(MidiOutStream):
    
    "Transposes all notes by 1 octave"
    
    def note_on(self, channel=0, note=0x40, velocity=0x40):
        if channel == 0:
            print (channel, note, velocity, self.rel_time())


event_handler = Transposer()

in_file = 'midiout/minimal_type0.mid'
midi_in = MidiInFile(event_handler, in_file)
midi_in.read()

Esempio n. 14
0
def markers(midifile, name, callback, channel=9):
    stream = CarbonOutStream(name=name, channel=channel, callback=callback)

    midi_in = MidiInFile(stream, midifile)
    midi_in.read()
Esempio n. 15
0
def read(midi_file, force_tempo=120, f=None):
    event_handler = MIDIStream(force_tempo=force_tempo, filter_func=f)
    midi_in = MidiInFile(event_handler, midi_file)
    midi_in.read()
    return event_handler.output
Esempio n. 16
0
        global tempo
        tempo = value

midi_file_dir = '/home/pi/midi_files'
kirby = 'KirbysTheme.mid'
test_file = 'outer.mid'
test_file = 'James_Bond_Theme_1.mid'
test_file = 'Zelda.mid'
test_file = 'Tetris.mid'
test_file = 'Mario.mid'
test_file= r'Star_Wars_Imperial_March_2.mid'

import os
f = open(os.path.join(midi_file_dir,test_file), 'rb')
midiIn = MidiInFile(MidiToFloppy(), f)
midiIn.read()
channel_numbers = channels.keys()
time_scaling = tempo * 1e-8
#time_scaling = tempo * 2e-8



def play_channel(one_channel, name, pins):
    ft = FloppyThread(name=name, dir_pin=pins[0], step_pin=pins[1])
    ft.reset_drive()
    ft.start()
    for i in xrange(len(one_channel)-1):
        on, note, abs_time = one_channel[i]
        duration = one_channel[i+1][2] - one_channel[i][2]
        if on:
            ft.play_midi_note(note)
Esempio n. 17
0
class MidiFile:
    def __init__(self,in_file=None,zeroVelOnIsOff=False):
        self.header = None
        self.tracks = []
        self.zeroVelOnIsOff = zeroVelOnIsOff
        if in_file is not None:
            self.readFile(in_file)
    
    def summarize(self):
        out = [self.getHeader().summarize()]
        for t in self.getTracks():
            out.append(t.summarize())
        return '\n'.join(out)

    def computeTime(self,time,track=0,defaultTempo=1000000):
        """Compute the time in seconds for <time> (in midi units), taking
        into account the tdiv, and tempo events in <track>"""
        try:
            return self.computeTimes([time],track=track,defaultTempo=defaultTempo)[0]
        except IndexError:
            print('No Tempo events found in file, could not compute time.')
            
    def computeTimes(self,times,track=0,defaultTempo=1000000):
        """Compute the time in seconds for <time> (in midi units), taking
        into account the tdiv, and tempo events in <track>"""
        try:
            events = self.getTrack(track).getEvents(TempoEvent)
        except:
            print('midi file has no %d-th track' % track)
            return False
        if len(events) > 0 and events[0].getTime() > 0:
            ## assume default tempo until first tempo event
            events.insert(0,TempoEvent(0,defaultTempo))
        mtime = max(times)
        timeTempo = array([(e.getTime(),e.getTempo()) for e in events if e.getTime() < mtime],double)
        tempoTimes = transpose(array((timeTempo[:,0],
                                      concatenate((array([0]),cumsum(timeTempo[:-1,1]*diff(timeTempo[:,0])))),
                                      timeTempo[:,1]),ndmin=2))
        j = 0
        result = [0]*len(times)
        for i in argsort(array(times)):
            while j < tempoTimes.shape[0] and tempoTimes[j,0] > times[i]:
                j = j+1
            result[i] = (tempoTimes[j-1,1] + (times[i]-tempoTimes[j-1,0])*tempoTimes[j-1,2])/\
                        (10**6*float(self.getHeader().getTimeDivision()))
        return result

    def getHeader(self):
        return self.header
    def setHeader(self,header):
        self.header = header
    def getTracks(self):
        return self.tracks
    def getTrack(self,n=0):
        return self.tracks[n]
    def replaceTrack(self,n,track):
        self.tracks[n] = track
    def addTrack(self,track):
        self.tracks.append(track)
        
    def readFile(self,filename):
        self.midi_in = MidiInFile(MidiHandler(self, self.zeroVelOnIsOff), filename)
        ## header and tracks get instantiated through the midi event handler
        self.midi_in.read()

    def writeFile(self,filename):
        midi = MidiOutFile(filename)
        self.getHeader().send(midi)
        [track.send(midi) for track in self.getTracks()]
        midi.eof()
Esempio n. 18
0
		self.song = Song()
	
	def tempo(self, value):
		bpm = 60.0 * 10.0**6 / value
		self.song.set_bpm(bpm)
		#print "Tempo", value, "BPM", bpm
		print self.song.bpm
	
	def note_on(self, channel=0, note=0x40, velocity=0x40):
		difficulty, value = noteMap[note]
		note = Note(time=self.abs_time(), value=value, type=NOTE_ON, velocity=velocity, channel=channel)
		self.song.add_note(difficulty, note)
	
	def note_off(self, channel=0, note=0x40, velocity=0x40):
		difficulty, value = noteMap[note]
		note = Note(time=self.abs_time(), value=value, type=NOTE_OFF, velocity=velocity, channel=channel)
		self.song.add_note(difficulty, note)
			

event_handler = TrackReader()

infile = "notes.mid"
midi_in = MidiInFile(event_handler, infile)
midi_in.read()

for difficulty,notes in event_handler.song.notes.iteritems():
	print "~~~~~~~~~~~~~~~~~~~", difficulty, "~~~~~~~~~~~~~~~~~~~"
	for time, note in notes.iteritems():
		print time, note.value, note.type

print "End of Line."
Esempio n. 19
0
def buildScore(in_file):
    event_handler = ScoreBuilder()
    midi_in = MidiInFile(event_handler, in_file)
    midi_in.read()
    return event_handler.score
Esempio n. 20
0
#process nparaay -> generation file
#Xdata set이랑 시작시간, 끝시간, 총 길이
#wav 파일 재생 시간 리턴

#model = network_utils.create_lstm_network(num_frequency_dimensions=8820,
#                                          num_hidden_dimensions=1024)
#model_reverse = network_utils.create_lstm_network(num_frequency_dimensions=8820,
#                                          num_hidden_dimensions=1024)

model.load_weights(model_basename)
#model_reverse.load_weights('./Weights_reverse')

# do parsing

midiIn = MidiInFile(MidiReadList(), recv_file_mid)
midiIn.read()

midi_list = []
midi_list = midiIn.parser.dispatch.outstream.note_list

num_examples = 1
max_seq_len = midiIn.parser.dispatch.outstream.alltime

vector_0 = np.zeros(num_dims)

out_shape = (num_examples, max_seq_len, num_dims)

x_data = np.zeros(out_shape)

num_note = 0
num_no_note = 0
Esempio n. 21
0
    def read_midi(self,
                  path,
                  name,
                  time_offset=[0.0, 0.0],
                  fg_channels=[1],
                  bg_channels=range(1, 17),
                  tStep=20,
                  tDelay=40.0,
                  legato=100.0,
                  tolerance=30.0):
        # absolute: *[1], relative: *[0]
        parser = SomaxMidiParser()
        midi_in = MidiInFile(parser, path)
        midi_in.read()
        midi_data = array(parser.get_matrix())
        fgmatrix, bgmatrix = splitMatrixByChannel(
            midi_data, fg_channels, bg_channels)  #de-interlacing information
        # creating harmonic ctxt
        if time_offset != [0.0, 0.0]:
            for i in range(0, len(fgmatrix)):
                fgmatrix[i][0] += time_offset[0]
                fgmatrix[i][5] += time_offset[1]
        if bgmatrix != []:
            if time_offset != [0.0, 0.0]:
                for i in range(0, len(fgmatrix)):
                    bgmatrix[i][0] += time_offset[0]
                    bgmatrix[i][5] += time_offset[1]
            harm_ctxt, tRef = computePitchClassVector(bgmatrix)
        else:
            harm_ctxt, tRef = computePitchClassVector(fgmatrix)

        # Initializing parameters
        lastNoteOnset = [0, -1 - tolerance]
        lastSliceOnset = list(lastNoteOnset)
        state_nb = 0
        global_time = time_offset
        corpus = dict({'name': "", 'typeID': "MIDI", 'size': 1, 'data': []})
        corpus["data"].append({"state": 0, "tempo":120, "time": {"absolute":[-1,0], "relative":[-1,0]}, "seg": [1,0], "beat":[0.0, 0.0, 0, 0], \
            "chroma": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "pitch":140, "notes":[]})
        tmp = dict()

        # Running over matrix' notes
        for i in range(0, len(fgmatrix)):
            # note is not in current slice
            if (fgmatrix[i][5] > (lastSliceOnset[1] + tolerance)):
                # finalizing current slice
                if state_nb > 0:
                    previousSliceDuration = [
                        fgmatrix[i][0] - lastSliceOnset[0],
                        fgmatrix[i][5] - lastSliceOnset[1]
                    ]
                    corpus["data"][state_nb]["time"]["absolute"][1] = float(
                        previousSliceDuration[1])
                    corpus["data"][state_nb]["time"]["relative"][1] = float(
                        previousSliceDuration[0])
                    tmpListOfPitches = getPitchContent(corpus["data"],
                                                       state_nb, legato)
                    if len(tmpListOfPitches) == 0:
                        if useRests:
                            corpus["data"][state_nb]["pitch"] = 140  # silence
                        else:
                            state_nb -= 1  # delete slice
                    elif len(tmpListOfPitches) == 1:
                        corpus["data"][state_nb]["pitch"] = int(
                            tmpListOfPitches[0])  # simply take the pitch
                    else:
                        virtualfunTmp = virfun.virfun(
                            tmpListOfPitches, 0.293)  # take the virtual root
                        corpus["data"][state_nb]["pitch"] = int(
                            128 + (virtualfunTmp - 8) % 12)

                # create a new state
                state_nb += 1
                global_time = float(fgmatrix[i][5])

                tmp = dict()
                tmp["state"] = int(state_nb)
                tmp["time"] = dict()
                tmp["time"]["absolute"] = list([global_time, fgmatrix[i][6]])
                tmp["time"]["relative"] = list(
                    [fgmatrix[i][0], fgmatrix[i][1]])
                tmp["tempo"] = fgmatrix[i][7]
                frameNbTmp = int(ceil(
                    (fgmatrix[i][5] + tDelay - tRef) / tStep))
                if frameNbTmp <= 0:
                    tmp["chroma"] = [
                        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
                    ]
                else:
                    tmp["chroma"] = harm_ctxt[:,
                                              min(frameNbTmp,
                                                  int(harm_ctxt.shape[1])
                                                  )].tolist()
                tmp["pitch"] = 0
                tmp["notes"] = []

                # if some notes ended in previous slice...
                for k in range(0, len(corpus["data"][state_nb - 1]["notes"])):
                    if ((corpus["data"][state_nb -
                                        1]["notes"][k]["time"]["relative"][0] +
                         corpus["data"][state_nb -
                                        1]["notes"][k]["time"]["relative"][1])
                            > previousSliceDuration[0]):
                        # adding lasting notes of previous slice to the new slice
                        note_to_add = dict()
                        note_to_add["pitch"] = int(
                            corpus["data"][state_nb - 1]["notes"][k]["pitch"])
                        note_to_add["velocity"] = int(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["velocity"])
                        note_to_add["channel"] = int(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["channel"])
                        note_to_add["time"] = dict()
                        note_to_add["time"]["relative"] = list(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["time"]["relative"])
                        note_to_add["time"]["absolute"] = list(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["time"]["absolute"])
                        note_to_add["time"]["relative"][
                            0] = note_to_add["time"]["relative"][0] - float(
                                previousSliceDuration[0])
                        note_to_add["time"]["absolute"][
                            0] = note_to_add["time"]["absolute"][0] - float(
                                previousSliceDuration[1])
                        if note_to_add["time"]["absolute"][0] > 0:
                            note_to_add["velocity"] = int(
                                corpus["data"][state_nb -
                                               1]["notes"][k]["velocity"])
                        else:
                            note_to_add["velocity"] = 0
                        tmp["notes"].append(note_to_add)
                # adding the new note
                tmp["notes"].append(dict())
                n = len(tmp["notes"]) - 1
                tmp["notes"][n] = {
                    "pitch": fgmatrix[i][3],
                    "velocity": fgmatrix[i][4],
                    "channel": fgmatrix[i][2],
                    "time": dict()
                }
                tmp["notes"][n]["time"]["absolute"] = [0, fgmatrix[i][6]]
                tmp["notes"][n]["time"]["relative"] = [0, fgmatrix[i][1]]

                #update variables used during the slicing process
                lastNoteOnset = [fgmatrix[i][0], fgmatrix[i][5]]
                lastSliceOnset = [fgmatrix[i][0], fgmatrix[i][5]]
                corpus["data"].append(tmp)
            else:
                # note in current slice
                nbNotesInSlice = len(corpus["data"][state_nb]["notes"])
                offset = fgmatrix[i][5] - corpus["data"][state_nb]["time"][
                    "absolute"][0]
                offset_r = fgmatrix[i][0] - corpus["data"][state_nb]["time"][
                    "relative"][0]
                tmp["notes"].append({
                    "pitch": fgmatrix[i][3],
                    "velocity": fgmatrix[i][4],
                    "channel": fgmatrix[i][2],
                    "time": dict()
                })
                tmp["notes"][nbNotesInSlice]["time"]["absolute"] = [
                    offset, fgmatrix[i][6]
                ]
                tmp["notes"][nbNotesInSlice]["time"]["relative"] = [
                    offset_r, fgmatrix[i][1]
                ]

                # extending slice duration
                if ((fgmatrix[i][6] + offset) >
                        corpus["data"][state_nb]["time"]["absolute"][1]):
                    corpus["data"][state_nb]["time"]["absolute"][
                        1] = fgmatrix[i][6] + int(offset)
                    corpus["data"][state_nb]["time"]["relative"][
                        1] = fgmatrix[i][1] + int(offset_r)
                lastNoteOnset = [fgmatrix[i][0], fgmatrix[i][5]]

        # on finalise la slice courante
        global_time = fgmatrix[i][5]
        lastSliceDuration = corpus["data"][state_nb]["time"]["absolute"][1]
        nbNotesInLastSlice = len(corpus["data"][state_nb]["notes"])
        tmpListOfPitches = getPitchContent(corpus["data"], state_nb, legato)
        if len(tmpListOfPitches) == 0:
            if useRests:
                corpus["data"][state_nb]["pitch"] = 140  # silence
            else:
                state_nb -= 1  # delete slice
        elif len(tmpListOfPitches) == 1:
            corpus["data"][state_nb]["pitch"] = int(tmpListOfPitches[0])
        else:
            virtualFunTmp = virfun.virfun(tmpListOfPitches, 0.293)
            corpus["data"][state_nb]["pitch"] = int(128 +
                                                    (virtualFunTmp - 8) % 12)

        frameNbTmp = int(ceil((fgmatrix[i][5] + tDelay - tRef) / tStep))
        if (frameNbTmp <= 0):
            corpus["data"][state_nb]["chroma"] = [
                0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
            ]
        else:
            corpus["data"][state_nb][
                "chroma"] = harm_ctxt[:,
                                      min(frameNbTmp, int(harm_ctxt.shape[1])
                                          )].tolist()
        corpus["size"] = state_nb + 1
        return corpus