コード例 #1
0
def midi_filename_to_piano_roll(midi_filename):
    midi_data = midiread(midi_filename, dt=0.3)
    piano_roll = midi_data.piano_roll.T

    # Binarize pressed notes
    piano_roll[piano_roll > 0] = 1

    return piano_roll
コード例 #2
0
ファイル: rnnrbm_tf.py プロジェクト: supalaaak/rnnrbm_tf
    def train(self, files, batch_size=100, num_epochs=200):
        '''Train the RNN-RBM via stochastic gradient descent (SGD) using MIDI
        files converted to piano-rolls.

        files : list of strings
            List of MIDI files that will be loaded as piano-rolls for training.
        batch_size : integer
            Training sequences will be split into subsequences of at most this
            size before applying the SGD updates.
        num_epochs : integer
            Number of epochs (pass over the training set) performed. The user
            can safely interrupt training with Ctrl+C at any time.'''

        assert len(files) > 0, 'Training set is empty!' \
                               ' (did you download the data files?)'
    
        dataset = [midiread(f, self.r, self.dt).piano_roll.astype(numpy.float32) for f in files]
        
        n_visible=self.r[1]-self.r[0]

        
        v=tf.placeholder(tf.float32, shape=(None,n_visible))
        
        
        with tf.variable_scope("model") as scope:
            try:
                para=initialize_parameters(n_visible, self.n_hidden, self.n_hidden_recurrent)
            except ValueError:
                scope.reuse_variables()
                para=initialize_parameters(n_visible, self.n_hidden, self.n_hidden_recurrent)
        
        (v_sample, cost, monitor) = build_rnnrbm(v, para)
    

        optimizer =tf.train.GradientDescentOptimizer(learning_rate = 0.001).minimize(monitor)
        init = tf.global_variables_initializer()

        try:
            with tf.Session() as sess:
                sess.run(init)
                for epoch in range(num_epochs):
                    numpy.random.shuffle(dataset)
                    costs = []
                    for s, sequence in enumerate(dataset):
                        for i in range(0, len(sequence), batch_size):
                            _ , cost2=sess.run([optimizer, monitor], feed_dict={v:sequence[i:i + batch_size]})
                            costs.append(cost2)
                    print('Epoch %i/%i' % (epoch + 1, num_epochs))
                    print(numpy.mean(costs))
                    sys.stdout.flush()
            
                W1=gen_rnnrbm(para, nsteps=200)
                piano_roll = sess.run(W1)
                #piano_roll=tf.reshape(piano_roll,[piano_roll.shape[0].value,piano_roll.shape[2].value])
                print(piano_roll)
                midiwrite('sample5.mid', piano_roll, self.r, self.dt)
        except KeyboardInterrupt:
            print('Interrupted by user.')
コード例 #3
0
ファイル: midi_utils.py プロジェクト: FromGold/GenerativeLSTM
def midi_to_events_roll(directory):
    path = directory + '/*.mid'
    files = glob.glob(path)
    notes_range = find_notes_range(directory)
    left_hand_top = 64
    max_vel = 128.
    notes_length = notes_range['M'] - notes_range['m'] + 1
    vector_len = notes_length + 2 + 2 + 1  # n bit per nota + 2 bit per velocity + 2 bit per durata nota + tempo attesa per prossima nota
    events_set = []
    for file in files:
        try:
            m = midiread(file)
        except Exception:
            continue
        i = 0
        song = []
        #faccio sorting prima sulla nota, poi sul tempo di inizio della nota
        events = sorted(m.notes, key=lambda x: x[0])
        events = sorted(events, key=lambda x: x[1])
        for j in xrange(len(events) + 1):
            if j != len(events) and events[i][1] == events[j][
                    1]:  #unisco le note che partono nello stesso istante; se non sono alla fine, continuo a unificare note
                continue
            a = [0] * vector_len
            hands = ([], [])
            notes = events[i:j]
            for p in notes:
                a[p[0] - notes_range[
                    'm']] = 1  #codifico le note premute insieme nel vettore corrente
                if p[0] < left_hand_top:  #cosi' dico che le note suonate nella parte sinistra della tastiera sono suonate con la mano sx.
                    hands[0].append(p)
                else:
                    hands[1].append(p)
            for idx in range(0, 2):
                if len(hands[idx]):
                    vel = [l[3] for l in hands[idx]]
                    span = [l[2] - l[1] for l in hands[idx]]
                    a[notes_length + idx] = float(
                        "%.3f" % ((sum(vel) / len(vel)) / max_vel))
                    a[notes_length + idx + 2] = float("%.3f" %
                                                      (sum(span) / len(span)))
            if j < len(events):
                a[-1] = events[j][1] - events[i][1]
            else:
                a[-1] = float(
                    "%.2f" % (max([n[2] for n in events]) - events[i][1])
                )  #se sono all'ultimo accordo, indico come tempo di attesa per la prossima nota il tempo in cui smette di suonare l'ultima nota
            i = j
            song.append(a)

        events_set.append(song)

    return events_set, notes_range
コード例 #4
0
ファイル: midi_utils.py プロジェクト: FromGold/GenerativeLSTM
def find_notes_range(directory):
    path = directory + '/*.mid'
    files = glob.glob(path)
    notes_range = {'m': 128, 'M': 0}
    for file in files:
        try:
            m = midiread(file)
        except Exception:
            continue
        for note in m.notes:
            if note[0] < notes_range['m']:
                notes_range['m'] = note[0]
            if note[0] > notes_range['M']:
                notes_range['M'] = note[0]
    return notes_range
コード例 #5
0
ファイル: rnnrbm.py プロジェクト: lucl1/ML-Learning
    def train(self, files, batch_size=100, num_epochs=200):
        '''Train the RNN-RBM via stochastic gradient descent (SGD) using MIDI
        files converted to piano-rolls.

        files : list of strings
            List of MIDI files that will be loaded as piano-rolls for training.
        batch_size : integer
            Training sequences will be split into subsequences of at most this
            size before applying the SGD updates.
        num_epochs : integer
            Number of epochs (pass over the training set) performed. The user
            can safely interrupt training with Ctrl+C at any time.'''

        assert len(files) > 0, 'Training set is empty!' \
                               ' (did you download the data files?)'
        dataset = [
            midiread(f, self.r,
                     self.dt).piano_roll.astype(theano.config.floatX)
            for f in files
        ]

        try:
            for epoch in range(num_epochs):
                numpy.random.shuffle(dataset)
                costs = []

                for s, sequence in enumerate(dataset):
                    for i in range(0, len(sequence), batch_size):
                        cost = self.train_function(sequence[i:i + batch_size])
                        costs.append(cost)

                print('Epoch %i/%i' % (epoch + 1, num_epochs))
                print(numpy.mean(costs))
                sys.stdout.flush()

        except KeyboardInterrupt:
            print('Interrupted by user.')