Ejemplo n.º 1
0
    def fromFile(self, filename):
        class MIDItoNotes(MidiOutStream):
            currentNotes = {}
            ppq = 96
            currentTrack = 0
            outList = []
            def header(self, format=0, nTracks=1, division=96):
                self.ppq = division
            def start_of_track(self, n_track=0):
                self.currentTrack = n_track
                print "start_of_track", n_track
            def note_on(self, channel=0, note=0x40, velocity=0x40):
                self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
            def note_off(self, channel=0, note=0x40, velocity=0x40):
                patch = 1
                if (self.currentTrack, note) in self.currentNotes:
                    out = (self.currentTrack,
                          'note',
                          note,
                          self.currentNotes[(self.currentTrack, note)][0],
                          float(self.abs_time())/1000-self.currentNotes[(self.currentTrack, note)][0],
                          self.currentNotes[(self.currentTrack, note)][1],
                          channel,
                          patch)
                    #print "out:", out
                    self.outList.append(out)
                    del self.currentNotes[(self.currentTrack, note)]
            def tempo(self,value):
                channel = 0
                patch = 0
                self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0,channel,patch))
            def patch_change(self, channel, patch):
                print "patch_change", "channel", channel, "patch", patch
                #self.currentChannel = channel
                #self.currentPatch = patch
            def sysex_event(self, parameter):
                print "sysex", parameter
            #def midi_ch_prefix(self, channel):
            #    print "midi channel:", channel

        event_handler = MIDItoNotes()
        midi_in = MidiInFile(event_handler, filename)

        print "starting read", filename
        try:
            midi_in.read()
        except:
            #todo: this is a hack, it just renames files it can't read
            print "renaming file so it won't be accessed again"
            os.rename(filename, filename + "_unreadable")
        print "finished read", filename

        # probably should not sort like this, who knows, seems like some things could get out of order
        self.events = sorted(event_handler.outList,key=itemgetter(3,0))
        self.ppq = event_handler.ppq
Ejemplo n.º 2
0
 def __init__(self, infile = 'fichiers_midi/internationale.mid', verbose=False):
     self.infile = infile
     self._data = {}
     self._busy_chan = []
     self.verbose = verbose
     self._tempo = 1000000000.0 # empiriquement : 1 noire à la seconde 
     midi_in = MidiInFile(self, self.infile)
     midi_in.read()
     # Une fois le parsing fini, on ajuste les temps et durées pour
     # tenir compte du tempo
     for chan_num, chan_data in self._data.iteritems() :
         for evt in chan_data :
             evt['time'] = int(evt['time'] * self._tempo * 1000)
             evt['duration'] = int(evt['duration'] * self._tempo*1000)
Ejemplo n.º 3
0
def getSpawnList():
    event_handler = TrackReader()

    infile = "notes.mid"
    midi_in = MidiInFile(event_handler, infile)
    midi_in.read()

    enemySpawner = EnemySpawn(event_handler.song)
    enemies = enemySpawner.enemies
    spawnList = []
    for key in range(0, enemySpawner.end + enemySpawner.chunk, enemySpawner.chunk):
        spawnList.append((key, enemies[key]))

    return spawnList
Ejemplo n.º 4
0
    def read(self, song, mode='full'):
        """Parse a midi file and fill with that data the SongSegment list.

        Keyword arguments:
        midi_file -- the midi file to parse

        """

        file_name = os.path.join(self.path, self.file_name)
        f = open(file_name, 'rb')

        # do parsing
        x = MidiToText(song)
        midiIn = MidiInFile(x, f)
        midiIn.read()
        f.close()
Ejemplo n.º 5
0
    def __init__(self, filename, r=(21, 109), dt=0.2):
        self.notes = []
        self._tempo = 500000
        self.beat = 0
        self.time = 0.0

        midi_in = MidiInFile(self, filename)
        midi_in.read()
        self.notes = [n for n in self.notes
                      if n[2] is not None]  # purge incomplete notes
        length = int(numpy.ceil(max(list(zip(*self.notes))[2]) /
                                dt))  # create piano-roll
        self.piano_roll = numpy.zeros((length, r[1] - r[0]))
        for n in self.notes:
            self.piano_roll[int(numpy.ceil(n[1] /
                                           dt)):int(numpy.ceil(n[2] / dt)),
                            n[0] - r[0]] = 1
Ejemplo n.º 6
0
 def fromFile(self, fileName):
     class MIDItoNotes(MidiOutStream):
         currentNotes = {}
         ppq = 96
         currentTrack = 0
         outList = []
         def header(self, format=0, nTracks=1, division=96):
             self.ppq = division
         def start_of_track(self, n_track=0):
             self.currentTrack = n_track
         def note_on(self, channel=0, note=0x40, velocity=0x40):
             self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
         def note_off(self, channel=0, note=0x40, velocity=0x40):
             self.outList.append((self.currentTrack,'note',note,self.currentNotes[(self.currentTrack,note)][0],float(self.abs_time())/1000-self.currentNotes[(self.currentTrack,note)][0],self.currentNotes[(self.currentTrack,note)][1]))
             del self.currentNotes[(self.currentTrack,note)]
         def tempo(self,value):
             self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0))
     event_handler = MIDItoNotes()
     midi_in = MidiInFile(event_handler, fileName)
     midi_in.read()
     self.events = sorted(event_handler.outList,key=itemgetter(3,0))
     self.ppq = event_handler.ppq
Ejemplo n.º 7
0
 def fromFile(self, fileName):
     class MIDItoNotes(MidiOutStream):
         currentNotes = {}
         ppq = 96
         currentTrack = 0
         outList = []
         def header(self, format=0, nTracks=1, division=96):
             self.ppq = division
         def start_of_track(self, n_track=0):
             self.currentTrack = n_track
         def note_on(self, channel=0, note=0x40, velocity=0x40):
             self.currentNotes[(self.currentTrack,note)] = (float(self.abs_time())/1000,velocity)
         def note_off(self, channel=0, note=0x40, velocity=0x40):
             self.outList.append((self.currentTrack,'note',note,self.currentNotes[(self.currentTrack,note)][0],float(self.abs_time())/1000-self.currentNotes[(self.currentTrack,note)][0],self.currentNotes[(self.currentTrack,note)][1]))
             del self.currentNotes[(self.currentTrack,note)]
         def tempo(self,value):
             self.outList.append((0,'tempo',value,float(self.abs_time())/1000,0,0))
     event_handler = MIDItoNotes()
     midi_in = MidiInFile(event_handler, fileName)
     midi_in.read()
     self.events = sorted(event_handler.outList,key=itemgetter(3,0))
     self.ppq = event_handler.ppq
Ejemplo n.º 8
0
def make_song(config, midi_file):
    song_builder = MidiSongBuilder(config)
    MidiInFile(song_builder, midi_file).read()
    print ''
    song = song_builder.song
    song.sort(key=lambda n: n[0])
    print song, '\n'
    #  song = strip_leading_silence(song)
    #  print song, '\n'

    rs = Song()
    for note in song:
        key = str(config['keys'][note[1]])
        rs.song.append(Note(time=Duration(note[0]), key=key))
    return rs
Ejemplo n.º 9
0
class MidiFile:
    def __init__(self,in_file=None,zeroVelOnIsOff=False):
        self.header = None
        self.tracks = []
        self.zeroVelOnIsOff = zeroVelOnIsOff
        if in_file is not None:
            self.readFile(in_file)
    
    def summarize(self):
        out = [self.getHeader().summarize()]
        for t in self.getTracks():
            out.append(t.summarize())
        return '\n'.join(out)

    def computeTime(self,time,track=0,defaultTempo=1000000):
        """Compute the time in seconds for <time> (in midi units), taking
        into account the tdiv, and tempo events in <track>"""
        try:
            return self.computeTimes([time],track=track,defaultTempo=defaultTempo)[0]
        except IndexError:
            print('No Tempo events found in file, could not compute time.')
            
    def computeTimes(self,times,track=0,defaultTempo=1000000):
        """Compute the time in seconds for <time> (in midi units), taking
        into account the tdiv, and tempo events in <track>"""
        try:
            events = self.getTrack(track).getEvents(TempoEvent)
        except:
            print('midi file has no %d-th track' % track)
            return False
        if len(events) > 0 and events[0].getTime() > 0:
            ## assume default tempo until first tempo event
            events.insert(0,TempoEvent(0,defaultTempo))
        mtime = max(times)
        timeTempo = array([(e.getTime(),e.getTempo()) for e in events if e.getTime() < mtime],double)
        tempoTimes = transpose(array((timeTempo[:,0],
                                      concatenate((array([0]),cumsum(timeTempo[:-1,1]*diff(timeTempo[:,0])))),
                                      timeTempo[:,1]),ndmin=2))
        j = 0
        result = [0]*len(times)
        for i in argsort(array(times)):
            while j < tempoTimes.shape[0] and tempoTimes[j,0] > times[i]:
                j = j+1
            result[i] = (tempoTimes[j-1,1] + (times[i]-tempoTimes[j-1,0])*tempoTimes[j-1,2])/\
                        (10**6*float(self.getHeader().getTimeDivision()))
        return result

    def getHeader(self):
        return self.header
    def setHeader(self,header):
        self.header = header
    def getTracks(self):
        return self.tracks
    def getTrack(self,n=0):
        return self.tracks[n]
    def replaceTrack(self,n,track):
        self.tracks[n] = track
    def addTrack(self,track):
        self.tracks.append(track)
        
    def readFile(self,filename):
        self.midi_in = MidiInFile(MidiHandler(self, self.zeroVelOnIsOff), filename)
        ## header and tracks get instantiated through the midi event handler
        self.midi_in.read()

    def writeFile(self,filename):
        midi = MidiOutFile(filename)
        self.getHeader().send(midi)
        [track.send(midi) for track in self.getTracks()]
        midi.eof()
Ejemplo n.º 10
0
def markers(midifile, name, callback, channel=9):
    stream = CarbonOutStream(name=name, channel=channel, callback=callback)

    midi_in = MidiInFile(stream, midifile)
    midi_in.read()
Ejemplo n.º 11
0
def midi_to_meta_data(midi_file_path):
    ''' returns a list: [(row, col, millisecond_midi_offset), ...] '''
    event_handler = NoteOnHandler()
    midi_in = MidiInFile(event_handler, midi_file_path)
    midi_in.read()
    return event_handler.offsets
Ejemplo n.º 12
0
def midi_to_meta_data(midi_file_path):
    ''' returns a list: [(row, col, millisecond_midi_offset), ...] '''
    event_handler = NoteOnHandler()
    midi_in = MidiInFile(event_handler, midi_file_path)
    midi_in.read()
    return event_handler.offsets
"""
This is an example that uses the MidiToText eventhandler. When an 
event is triggered on it, it prints the event to the console.

It gets the events from the MidiInFile.

So it prints all the events from the infile to the console. great for 
debugging :-s
"""

# get data
test_file = 'test/midifiles/002.mid'

# do parsing
from midi.MidiInFile import MidiInFile
from midi.MidiToText import MidiToText  # the event handler
midiIn = MidiInFile(MidiToText(), test_file)
midiIn.read()
Ejemplo n.º 14
0
def buildScore(in_file):
    event_handler = ScoreBuilder()
    midi_in = MidiInFile(event_handler, in_file)
    midi_in.read()
    return event_handler.score
from midi.MidiOutStream import MidiOutStream
from midi.MidiInFile import MidiInFile

"""
This prints all note on events on midi channel 0
"""


class Transposer(MidiOutStream):
    
    "Transposes all notes by 1 octave"
    
    def note_on(self, channel=0, note=0x40, velocity=0x40):
        if channel == 0:
            print (channel, note, velocity, self.rel_time())


event_handler = Transposer()

in_file = 'midiout/minimal_type0.mid'
midi_in = MidiInFile(event_handler, in_file)
midi_in.read()

Ejemplo n.º 16
0
def read(midi_file, force_tempo=120, f=None):
    event_handler = MIDIStream(force_tempo=force_tempo, filter_func=f)
    midi_in = MidiInFile(event_handler, midi_file)
    midi_in.read()
    return event_handler.output
Ejemplo n.º 17
0
def read(midi_file, force_tempo=120, f=None):
    event_handler = MIDIStream(force_tempo=force_tempo, filter_func=f)
    midi_in = MidiInFile(event_handler, midi_file)
    midi_in.read()
    return event_handler.output
Ejemplo n.º 18
0
def markers(midifile, name, callback, channel=9):
    stream = CarbonOutStream(name=name, channel=channel, callback=callback)

    midi_in = MidiInFile(stream, midifile)
    midi_in.read()
Ejemplo n.º 19
0
def midi_to_abc(filename=None,
                notes=None,
                key=None,
                metre=Fraction(3, 4),
                default_len=Fraction(1, 16),
                bars_per_line=4,
                title='',
                source='',
                no_triplets=False,
                no_broken_rythms=False,
                slur_8th_pairs=False,
                slur_16th_pairs=False,
                slur_triplets=True,
                no_beam_breaks=False,
                index='1',
                anacrusis_notes=0):
    global num_quarter_notes_per_bar
    num_quarter_notes_per_bar = metre * 4  # int(metre * 4)

    if filename and not notes:
        # read midi notes
        handler1 = MidiHandler(0, 15)  # channels 0-15
        # handler1 = MidiHandler(0, 0)  # channels 0-15
        MidiInFile(handler1, filename).read()
        notes = handler1.notes
    elif not filename and not notes:
        raise Exception(
            'midi_to_abc needs to be passed either a filename or a notes argument'
        )

    # sequence of Note(start, end, note)
    notes = sorted(notes, key=lambda n: n.start)
    fix_lengths(notes)

    # determine key and accidentals
    if not key:
        key = get_best_key_for_midi_notes([note.note for note in notes])
    key_accidentals = get_accidentals_for_key(key)
    cur_accidentals = key_accidentals[:]

    output = StringIO()
    output.write(u'X:%s\n' % index)
    if source:
        output.write(u'S:%s\n' % source)
    if title:
        output.write(u'T:%s\n' % title)
    output.write(u'M:%s\n' % metre)
    output.write(u'L:%s\n' % default_len)
    output.write(u'K:%s\n' % key.capitalize())

    # initialize variables used in loop
    last_note_start = -1.0
    bow_started = False
    broken_rythm_factor = Fraction(1, 1)
    num_notes = len(notes)
    bar_num = -1

    if anacrusis_notes:
        time_shift = 4 * float(metre) - notes[anacrusis_notes].start
        # print notes[0].start, notes[1].start, notes[2].start
        # print 'shift', time_shift
        for n in notes:
            n.start += time_shift
            n.end += time_shift

    # don't count the first bar if it's an upbeat (to get equal number of bars on each line)
    inside_upbeat = (notes
                     and bar_residue(notes[0].start) > 1.0) or anacrusis_notes

    while notes:
        # if current note is in a different bar than the last one, emit '|'
        if bar(notes[0].start) > bar(last_note_start):
            if len(notes) != num_notes:
                output.write(u' |')
                cur_accidentals = key_accidentals[:]
            if not inside_upbeat:
                bar_num += 1
            inside_upbeat = False
            if bar_num % bars_per_line == 0 and bar_num > 0:
                output.write(u'\n')

        # if we have advanced the length of a quarter note, emit space (for note grouping)
        br = bar_residue(notes[0].start)
        if is_at_even(notes[0].start, Fraction(1, 4)) and not no_beam_breaks:
            output.write(u' ')

        # check if next three notes can be interpreted as a triplet
        if is_triplet(notes) and not no_triplets:
            length = time_to_note_length(notes[2].end - notes[0].start)
            _notes = [n.note for n in notes[:3]]

            # convert notes to string representation
            s = u'(3' + ''.join([
                note_to_string(n.note, n.length * 2, default_len,
                               key_accidentals, cur_accidentals)
                for n in notes[:3]
            ])
            if slur_triplets:
                s = u'(' + s + ')'
            output.write(s)

            last_note_start = notes[0].start
            notes = notes[3:]

        # else handle notes one by one or as a chord
        else:
            is_four_16th_notes = len(
                [n for n in notes[0:4] if n.length == Fraction(1, 16)]) == 4
            # either two eights or two eights with broken rythm
            is_two_8th_notes = (len(
                [n for n in notes[0:2] if n.length == Fraction(1, 8)]) == 2
                                or len(notes) >= 2 and
                                (notes[0].length, notes[1].length) in [
                                    (Fraction(3, 16), Fraction(1, 16)),
                                    (Fraction(1, 16), Fraction(3, 16))
                                ])

            note = notes.pop(0)
            last_note_start = note.start

            # build a chord from notes near each other in time (will result in just one element in the non-chord case)
            chord_notes = [note.note]
            while notes and abs(notes[0].start - note.start) < 1.0 / 50:
                chord_notes.append(notes.pop(0).note)

            # let the duration of the first note determine the chord's duration (for simplicity)
            length = note.length

            # if the current note is the first of four 16th notes then add a bow on the two first notes
            bow_started_here = False
            if notes and abs(
                    br - int(br)
            ) < 1.0 / 20 and not bow_started and is_four_16th_notes and slur_16th_pairs:
                bow_started_here = True
                bow_started = True
                output.write(u'(')
            elif notes and abs(
                    br - int(br)
            ) < 1.0 / 20 and not bow_started and is_two_8th_notes and slur_8th_pairs and br < 2.0:
                bow_started_here = True
                bow_started = True
                output.write(u'(')

            # check if it's possible to use a broken rytm (< or >) between the current and next note/chord
            broken_rythm_symbol = ''
            if not no_broken_rythms:
                # a broken rythm was activated at the previous note
                if broken_rythm_factor != Fraction(1, 1):
                    length = length * broken_rythm_factor
                    broken_rythm_factor = Fraction(1, 1)
                elif notes and is_at_even(last_note_start, Fraction(
                        1, 8)) and bar(last_note_start) == bar(notes[0].start):
                    # use > between this and next note
                    if notes[0].length == length / 3:
                        broken_rythm_symbol = '>'
                        length = length * Fraction(2, 3)
                        broken_rythm_factor = Fraction(2, 1)
                    # use < between this and next note
                    if notes[0].length == length * 3:
                        broken_rythm_symbol = '<'
                        length = length * Fraction(2, 1)
                        broken_rythm_factor = Fraction(2, 3)

            # convert notes to string representation and output
            s = u''.join([
                note_to_string(n, length, default_len, key_accidentals,
                               cur_accidentals) for n in chord_notes
            ])
            if len(chord_notes) > 1:
                s = u'[' + s + ']'  # wrap chord
            output.write(s)

            # output broken rythm symbol if set
            if broken_rythm_symbol:
                output.write(unicode(broken_rythm_symbol))

            # if a bow was previously started end it here
            if bow_started and not bow_started_here:
                output.write(u')')
                bow_started = False

            # print 'note', note.start, length, chord_notes, '%.2f' % last_note_start, bar(last_note_start), '%.2f' % bar_residue(last_note_start)#, note.note

    output.write(u' |')
    output.write(u'\n')

    # left strip lines
    lines = output.getvalue().split('\n')
    lines = [l.lstrip() for l in lines]
    return u'\n'.join(lines)
Ejemplo n.º 20
0
def buildScore(in_file):
    event_handler = ScoreBuilder()
    midi_in = MidiInFile(event_handler, in_file)
    midi_in.read()
    return event_handler.score
Ejemplo n.º 21
0
    def tempo(self, value):
        global tempo
        tempo = value

midi_file_dir = '/home/pi/midi_files'
kirby = 'KirbysTheme.mid'
test_file = 'outer.mid'
test_file = 'James_Bond_Theme_1.mid'
test_file = 'Zelda.mid'
test_file = 'Tetris.mid'
test_file = 'Mario.mid'
test_file= r'Star_Wars_Imperial_March_2.mid'

import os
f = open(os.path.join(midi_file_dir,test_file), 'rb')
midiIn = MidiInFile(MidiToFloppy(), f)
midiIn.read()
channel_numbers = channels.keys()
time_scaling = tempo * 1e-8
#time_scaling = tempo * 2e-8



def play_channel(one_channel, name, pins):
    ft = FloppyThread(name=name, dir_pin=pins[0], step_pin=pins[1])
    ft.reset_drive()
    ft.start()
    for i in xrange(len(one_channel)-1):
        on, note, abs_time = one_channel[i]
        duration = one_channel[i+1][2] - one_channel[i][2]
        if on:
Ejemplo n.º 22
0
 def readFile(self,filename):
     self.midi_in = MidiInFile(MidiHandler(self, self.zeroVelOnIsOff), filename)
     ## header and tracks get instantiated through the midi event handler
     self.midi_in.read()
Ejemplo n.º 23
0
    if no == 221 or no == 425 or no == 445:
        continue

    if no / 10 < 1:
        filename = '00' + str(no)
    else:
        if no / 100 < 1:
            filename = '0' + str(no)
        else:
            filename = str(no)
    # get data
    test_file = 'midi/files/' + filename + '.mid'

    # do parsing

    midiIn = MidiInFile(MidiReadList(), test_file)
    midiIn.read()

    midi_list = []
    midi_list = midiIn.parser.dispatch.outstream.note_list

    num_examples = 1
    max_seq_len = midiIn.parser.dispatch.outstream.alltime

    vector_0 = np.zeros(num_dims)

    out_shape = (num_examples, max_seq_len, num_dims)

    x_data = np.zeros(out_shape)
    y_data = np.zeros(out_shape)
Ejemplo n.º 24
0
		self.song = Song()
	
	def tempo(self, value):
		bpm = 60.0 * 10.0**6 / value
		self.song.set_bpm(bpm)
		#print "Tempo", value, "BPM", bpm
		print self.song.bpm
	
	def note_on(self, channel=0, note=0x40, velocity=0x40):
		difficulty, value = noteMap[note]
		note = Note(time=self.abs_time(), value=value, type=NOTE_ON, velocity=velocity, channel=channel)
		self.song.add_note(difficulty, note)
	
	def note_off(self, channel=0, note=0x40, velocity=0x40):
		difficulty, value = noteMap[note]
		note = Note(time=self.abs_time(), value=value, type=NOTE_OFF, velocity=velocity, channel=channel)
		self.song.add_note(difficulty, note)
			

event_handler = TrackReader()

infile = "notes.mid"
midi_in = MidiInFile(event_handler, infile)
midi_in.read()

for difficulty,notes in event_handler.song.notes.iteritems():
	print "~~~~~~~~~~~~~~~~~~~", difficulty, "~~~~~~~~~~~~~~~~~~~"
	for time, note in notes.iteritems():
		print time, note.value, note.type

print "End of Line."
Ejemplo n.º 25
0
#process nparaay -> generation file
#Xdata set이랑 시작시간, 끝시간, 총 길이
#wav 파일 재생 시간 리턴

#model = network_utils.create_lstm_network(num_frequency_dimensions=8820,
#                                          num_hidden_dimensions=1024)
#model_reverse = network_utils.create_lstm_network(num_frequency_dimensions=8820,
#                                          num_hidden_dimensions=1024)

model.load_weights(model_basename)
#model_reverse.load_weights('./Weights_reverse')

# do parsing

midiIn = MidiInFile(MidiReadList(), recv_file_mid)
midiIn.read()

midi_list = []
midi_list = midiIn.parser.dispatch.outstream.note_list

num_examples = 1
max_seq_len = midiIn.parser.dispatch.outstream.alltime

vector_0 = np.zeros(num_dims)

out_shape = (num_examples, max_seq_len, num_dims)

x_data = np.zeros(out_shape)

num_note = 0
Ejemplo n.º 26
0
    def read_midi(self,
                  path,
                  name,
                  time_offset=[0.0, 0.0],
                  fg_channels=[1],
                  bg_channels=range(1, 17),
                  tStep=20,
                  tDelay=40.0,
                  legato=100.0,
                  tolerance=30.0):
        # absolute: *[1], relative: *[0]
        parser = SomaxMidiParser()
        midi_in = MidiInFile(parser, path)
        midi_in.read()
        midi_data = array(parser.get_matrix())
        fgmatrix, bgmatrix = splitMatrixByChannel(
            midi_data, fg_channels, bg_channels)  #de-interlacing information
        # creating harmonic ctxt
        if time_offset != [0.0, 0.0]:
            for i in range(0, len(fgmatrix)):
                fgmatrix[i][0] += time_offset[0]
                fgmatrix[i][5] += time_offset[1]
        if bgmatrix != []:
            if time_offset != [0.0, 0.0]:
                for i in range(0, len(fgmatrix)):
                    bgmatrix[i][0] += time_offset[0]
                    bgmatrix[i][5] += time_offset[1]
            harm_ctxt, tRef = computePitchClassVector(bgmatrix)
        else:
            harm_ctxt, tRef = computePitchClassVector(fgmatrix)

        # Initializing parameters
        lastNoteOnset = [0, -1 - tolerance]
        lastSliceOnset = list(lastNoteOnset)
        state_nb = 0
        global_time = time_offset
        corpus = dict({'name': "", 'typeID': "MIDI", 'size': 1, 'data': []})
        corpus["data"].append({"state": 0, "tempo":120, "time": {"absolute":[-1,0], "relative":[-1,0]}, "seg": [1,0], "beat":[0.0, 0.0, 0, 0], \
            "chroma": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "pitch":140, "notes":[]})
        tmp = dict()

        # Running over matrix' notes
        for i in range(0, len(fgmatrix)):
            # note is not in current slice
            if (fgmatrix[i][5] > (lastSliceOnset[1] + tolerance)):
                # finalizing current slice
                if state_nb > 0:
                    previousSliceDuration = [
                        fgmatrix[i][0] - lastSliceOnset[0],
                        fgmatrix[i][5] - lastSliceOnset[1]
                    ]
                    corpus["data"][state_nb]["time"]["absolute"][1] = float(
                        previousSliceDuration[1])
                    corpus["data"][state_nb]["time"]["relative"][1] = float(
                        previousSliceDuration[0])
                    tmpListOfPitches = getPitchContent(corpus["data"],
                                                       state_nb, legato)
                    if len(tmpListOfPitches) == 0:
                        if useRests:
                            corpus["data"][state_nb]["pitch"] = 140  # silence
                        else:
                            state_nb -= 1  # delete slice
                    elif len(tmpListOfPitches) == 1:
                        corpus["data"][state_nb]["pitch"] = int(
                            tmpListOfPitches[0])  # simply take the pitch
                    else:
                        virtualfunTmp = virfun.virfun(
                            tmpListOfPitches, 0.293)  # take the virtual root
                        corpus["data"][state_nb]["pitch"] = int(
                            128 + (virtualfunTmp - 8) % 12)

                # create a new state
                state_nb += 1
                global_time = float(fgmatrix[i][5])

                tmp = dict()
                tmp["state"] = int(state_nb)
                tmp["time"] = dict()
                tmp["time"]["absolute"] = list([global_time, fgmatrix[i][6]])
                tmp["time"]["relative"] = list(
                    [fgmatrix[i][0], fgmatrix[i][1]])
                tmp["tempo"] = fgmatrix[i][7]
                frameNbTmp = int(ceil(
                    (fgmatrix[i][5] + tDelay - tRef) / tStep))
                if frameNbTmp <= 0:
                    tmp["chroma"] = [
                        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
                    ]
                else:
                    tmp["chroma"] = harm_ctxt[:,
                                              min(frameNbTmp,
                                                  int(harm_ctxt.shape[1])
                                                  )].tolist()
                tmp["pitch"] = 0
                tmp["notes"] = []

                # if some notes ended in previous slice...
                for k in range(0, len(corpus["data"][state_nb - 1]["notes"])):
                    if ((corpus["data"][state_nb -
                                        1]["notes"][k]["time"]["relative"][0] +
                         corpus["data"][state_nb -
                                        1]["notes"][k]["time"]["relative"][1])
                            > previousSliceDuration[0]):
                        # adding lasting notes of previous slice to the new slice
                        note_to_add = dict()
                        note_to_add["pitch"] = int(
                            corpus["data"][state_nb - 1]["notes"][k]["pitch"])
                        note_to_add["velocity"] = int(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["velocity"])
                        note_to_add["channel"] = int(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["channel"])
                        note_to_add["time"] = dict()
                        note_to_add["time"]["relative"] = list(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["time"]["relative"])
                        note_to_add["time"]["absolute"] = list(
                            corpus["data"][state_nb -
                                           1]["notes"][k]["time"]["absolute"])
                        note_to_add["time"]["relative"][
                            0] = note_to_add["time"]["relative"][0] - float(
                                previousSliceDuration[0])
                        note_to_add["time"]["absolute"][
                            0] = note_to_add["time"]["absolute"][0] - float(
                                previousSliceDuration[1])
                        if note_to_add["time"]["absolute"][0] > 0:
                            note_to_add["velocity"] = int(
                                corpus["data"][state_nb -
                                               1]["notes"][k]["velocity"])
                        else:
                            note_to_add["velocity"] = 0
                        tmp["notes"].append(note_to_add)
                # adding the new note
                tmp["notes"].append(dict())
                n = len(tmp["notes"]) - 1
                tmp["notes"][n] = {
                    "pitch": fgmatrix[i][3],
                    "velocity": fgmatrix[i][4],
                    "channel": fgmatrix[i][2],
                    "time": dict()
                }
                tmp["notes"][n]["time"]["absolute"] = [0, fgmatrix[i][6]]
                tmp["notes"][n]["time"]["relative"] = [0, fgmatrix[i][1]]

                #update variables used during the slicing process
                lastNoteOnset = [fgmatrix[i][0], fgmatrix[i][5]]
                lastSliceOnset = [fgmatrix[i][0], fgmatrix[i][5]]
                corpus["data"].append(tmp)
            else:
                # note in current slice
                nbNotesInSlice = len(corpus["data"][state_nb]["notes"])
                offset = fgmatrix[i][5] - corpus["data"][state_nb]["time"][
                    "absolute"][0]
                offset_r = fgmatrix[i][0] - corpus["data"][state_nb]["time"][
                    "relative"][0]
                tmp["notes"].append({
                    "pitch": fgmatrix[i][3],
                    "velocity": fgmatrix[i][4],
                    "channel": fgmatrix[i][2],
                    "time": dict()
                })
                tmp["notes"][nbNotesInSlice]["time"]["absolute"] = [
                    offset, fgmatrix[i][6]
                ]
                tmp["notes"][nbNotesInSlice]["time"]["relative"] = [
                    offset_r, fgmatrix[i][1]
                ]

                # extending slice duration
                if ((fgmatrix[i][6] + offset) >
                        corpus["data"][state_nb]["time"]["absolute"][1]):
                    corpus["data"][state_nb]["time"]["absolute"][
                        1] = fgmatrix[i][6] + int(offset)
                    corpus["data"][state_nb]["time"]["relative"][
                        1] = fgmatrix[i][1] + int(offset_r)
                lastNoteOnset = [fgmatrix[i][0], fgmatrix[i][5]]

        # on finalise la slice courante
        global_time = fgmatrix[i][5]
        lastSliceDuration = corpus["data"][state_nb]["time"]["absolute"][1]
        nbNotesInLastSlice = len(corpus["data"][state_nb]["notes"])
        tmpListOfPitches = getPitchContent(corpus["data"], state_nb, legato)
        if len(tmpListOfPitches) == 0:
            if useRests:
                corpus["data"][state_nb]["pitch"] = 140  # silence
            else:
                state_nb -= 1  # delete slice
        elif len(tmpListOfPitches) == 1:
            corpus["data"][state_nb]["pitch"] = int(tmpListOfPitches[0])
        else:
            virtualFunTmp = virfun.virfun(tmpListOfPitches, 0.293)
            corpus["data"][state_nb]["pitch"] = int(128 +
                                                    (virtualFunTmp - 8) % 12)

        frameNbTmp = int(ceil((fgmatrix[i][5] + tDelay - tRef) / tStep))
        if (frameNbTmp <= 0):
            corpus["data"][state_nb]["chroma"] = [
                0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
            ]
        else:
            corpus["data"][state_nb][
                "chroma"] = harm_ctxt[:,
                                      min(frameNbTmp, int(harm_ctxt.shape[1])
                                          )].tolist()
        corpus["size"] = state_nb + 1
        return corpus