Esempio n. 1
0
    def build_and_train(self, x, y):
        from keras.layers import Layer, Dense, Conv1D, MaxPool1D, pooling, MaxPool1D, Flatten
        from keras.models import Sequential
        from sklearn.preprocessing import LabelBinarizer

        encoder = LabelBinarizer()
        onehot_y = encoder.fit_transform(y)
        

        lx,ly,lz = x.shape
        self.meta.update({
            "word_vec_size": lz,
            "len_words": ly,
            "encoder_classes": encoder.classes_
        })

        model = Sequential()
        model.add(Conv1D(2, 3, input_shape=(ly, lz), activation='sigmoid'))
        # model.add(MaxPool1D())
        model.add(Flatten())
        model.add(Dense(3, activation='sigmoid'))
        model.add(Dense(onehot_y.shape[1], activation='softmax'))

        model.compile('Adam', loss='mse')

        model.fit(x, onehot_y, epochs=100)

        score = float(model.test_on_batch(x, onehot_y))
        self.meta['score'] = score
        self.set_status_message(f'Model fit complete. Score {score}')
        return model
Esempio n. 2
0
def NN_Basic(dataX, dataY, labels, look_back=10):

    dataX, dataY, labels = ppu.combine_history(dataX,
                                               dataY,
                                               labels,
                                               look_back=look_back)
    dataX = (dataX - np.min(dataX)) / (np.max(dataX) - np.min(dataX))
    input_dim = dataX.shape[1]

    model = Sequential()
    model.add(Dense(64, activation='relu', input_dim=input_dim))
    model.add(Dropout(0.2))
    model.add(Dense(16, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(dataX, labels * 1.0, epochs=1000, batch_size=8)
    print "Neural Network Basic Accuracy:" + str(
        model.test_on_batch(dataX, labels)[1])
    plt.plot(range(1000), history.history['acc'])
    plt.show()

    return model
Esempio n. 3
0
def main():
    if len(sys.argv) <= 1:
        print("Usage: {0} [pointers/training.txt]".format(sys.argv[0]))
        return

    pointers_file = sys.argv[1]

    dataset, params = load(
        pointers_file)[::6]  ## first & last of ((7-1) returned variables)
    print(str(len(dataset)) + " " + str(len(params)))
    print(params)

    print("Intitializing neural network...")

    model = Sequential()
    model.add(Dropout(0.1, input_shape=(7500, )))
    model.add(Dense(7500))
    model.add(Activation('tanh'))
    model.add(Dropout(0.05))
    model.add(Dense(1875))
    model.add(Activation('tanh'))
    model.add(Dense(512))
    model.add(Activation('tanh'))
    model.add(Dropout(0.01))
    model.add(Dense(128))
    model.add(Activation('tanh'))
    model.add(Dense(4))  # output // 4 categories
    model.add(Activation('softmax'))
    ''' model.add(Dense(8, input_dim=7500))
    model.add(Activation('tanh'))
    model.add(Dense(4)) # output // 4 categories
    model.add(Activation('softmax')) '''

    adadelta = Adadelta(lr=0.01)
    adam = Adam(lr=0.01)
    sgd = SGD(lr=0.01)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd)  #, metrics=['accuracy'])
    #model.compile(loss='categorical_crossentropy', optimizer=sgd)#, metrics=['categorical_accuracy'])

    print("Training neural network...")

    model.fit(np.array(dataset),
              np.array(params),
              verbose=1,
              batch_size=1,
              epochs=5)  #, validation_split=0.3, shuffle=True)

    testx = np.array(dataset[0:1])
    testy = np.array(params[0:1])
    testx2 = np.array([dataset[-1]])
    testx3 = np.array([dataset[8]])

    print(model.predict_on_batch(np.array(dataset)))
    print(model.predict(np.array(dataset)))
    print(model.predict_classes(np.array(dataset)))
    score = model.evaluate(testx, testy)
    print(score)
    print(model.test_on_batch(testx, testy))
Esempio n. 4
0
    def BuildModelKeras(self, test_len, error_tol):
        start = time.time()
        model = Sequential()
        # build a LSTM RNN
        # model.add(ResidualKeras(units=self.n_hidden, input_shape=(self.n_steps, self.n_input)))
        model.add(
            LSTM(units=self.n_hidden,
                 input_shape=(self.n_steps, self.n_input)))
        model.add(Dense(self.n_classes))
        # compile
        adam = Adam(self.learning_rate)
        model.compile(optimizer=adam, loss='mean_squared_error')
        # data
        no_of_samples = np.shape(self.X)[0]
        test_data = self.X[no_of_samples - test_len:].reshape(
            (-1, self.n_steps, self.n_input))
        test_label = self.Y[no_of_samples - test_len:].reshape(
            (-1, self.n_classes))
        step = 1
        cost = error_tol
        Loss = []
        Elapsed = []

        while ((step < self.training_iters) & (cost >= error_tol)):
            index = np.random.random_integers(0, no_of_samples - test_len,
                                              self.batch_size)
            batch_x = self.X[index, :]
            batch_y = self.Y[index].reshape((self.batch_size, 1))
            batch_x = batch_x.reshape(
                (self.batch_size, self.n_steps, self.n_input))
            cost = model.train_on_batch(batch_x, batch_y)
            pred = model.predict(batch_x, self.batch_size)
            print("Iter " + str(step) + ", Training Accuracy= " +
                  "{:.7f}".format(cost))
            if step % self.display_step == 0:
                Loss.append(cost)
                Elapsed.append((time.time() - start) / 3600)
                print("Iter " + str(step) + ", Training Accuracy= " +
                      "{:.7f}".format(cost))
            step += 1
        """
        train_x = self.X.reshape((2000, self.n_steps, self.n_input))
        train_y = self.Y.reshape((2000,1))
        model.fit(train_x, train_y, epochs=5000, batch_size=self.batch_size)
        """
        model.summary()
        # Calculate accuracy for test data
        result = model.test_on_batch(test_data, test_label)
        # Save the model
        model.save('Kerasmodel.h5')
        print("Testing Loss:", result)
        elapsed = time.time() - start
        """
        if self.deg_of_sig==0:
            plt.plot(Elapsed, Loss, label='Time series')
        else:
            plt.plot(Elapsed, Loss, label='degree %d' %self.deg_of_sig)
        """
        return {'Loss': result, 'Time': elapsed, 'NStep': step}
Esempio n. 5
0
    def BuildModelKeras(self, test_len, error_tol):
        start = time.time()
        model = Sequential()
        # build a LSTM RNN
        # model.add(ResidualKeras(units=self.n_hidden, input_shape=(self.n_steps, self.n_input)))
        model.add(
            LSTM(units=self.n_hidden,
                 input_shape=(self.n_steps, self.n_input)))
        model.add(Dense(self.n_classes))
        # compile
        adam = Adam(self.learning_rate)
        model.compile(optimizer=adam, loss='mean_squared_error')
        # data
        no_of_samples = np.shape(self.X)[0]
        test_data = self.X[no_of_samples - test_len:].reshape(
            (-1, self.n_steps, self.n_input))
        test_label = self.Y[no_of_samples - test_len:].reshape(
            (-1, self.n_classes))
        step = 1
        cost = error_tol
        Loss = []
        Elapsed = []

        while ((step < self.training_iters) & (cost >= error_tol)):
            index = np.random.random_integers(0, no_of_samples - test_len,
                                              self.batch_size)
            batch_x = self.X[index, :]
            batch_y = self.Y[index].reshape((self.batch_size, 1))
            batch_x = batch_x.reshape(
                (self.batch_size, self.n_steps, self.n_input))
            cost = model.train_on_batch(batch_x, batch_y)
            pred = model.predict(batch_x, self.batch_size)
            if step % self.display_step == 0:
                Loss.append(cost)
                Elapsed.append((time.time() - start) / 3600)
                print("Iter " + str(step) + ", Training Accuracy= " +
                      "{:.7f}".format(cost))
            step += 1

        model.summary()
        # Calculate accuracy for test data
        result = model.test_on_batch(test_data, test_label)
        # Save the model
        model.save(self.prefix + 'model_logsig%d_segment%d.h5' %
                   (self.deg_of_logsig, self.n_steps))
        elapsed = time.time() - start

        test_pred = model.predict(test_data)

        return {
            'Loss': result,
            'Time': elapsed,
            'Pred': test_pred,
            'model': model
        }
Esempio n. 6
0
    def build_and_train(self, x, y):
        from keras.layers import Dense, Activation, Dropout, Flatten
        from keras.models import Sequential
        from sklearn.preprocessing import LabelBinarizer
        import matplotlib.pyplot as plt

        encoder = LabelBinarizer()
        onehot_y = encoder.fit_transform(y)

        lx, ly, lz = x.shape
        self.meta.update({
            "word_vec_size": lz,
            "len_words": ly,
            "encoder_classes": encoder.classes_
        })

        model = Sequential()

        #много слоев
        model.add(Dense(120, input_shape=(ly, lz)))
        model.add(Dense(160, activation='tanh'))
        model.add(Dropout(0.1))
        model.add(Dense(100, activation='sigmoid'))
        model.add(Dropout(0.2))
        model.add(Dense(60, activation='relu'))
        model.add(Dropout(0.3))
        model.add(Dense(80, activation='hard_sigmoid'))
        model.add(Flatten())
        model.add(Dense(onehot_y.shape[1], activation='softmax'))

        model.compile('Adam', loss='mse', metrics=['acc'])

        history = model.fit(x,
                            onehot_y,
                            batch_size=5,
                            epochs=100,
                            validation_split=0.1)

        scores = float(model.test_on_batch(x, onehot_y)[0])
        print(f'точность на тестовых данных: {scores*100}')

        plt.figure(0)
        plt.subplot(2, 1, 1)
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'], '--')
        plt.title('Loss')
        plt.legend(['Loss', 'Val loss'])

        plt.subplot(2, 1, 2)
        plt.title('Acc')
        plt.plot(history.history['acc'])
        plt.savefig(str(Path(self.output().path).with_suffix('.png')))

        return model
Esempio n. 7
0
    def build_and_train(self, x, y):
        from keras.layers import Dense, Activation, Dropout, Flatten
        from keras.models import Sequential
        from sklearn.preprocessing import LabelBinarizer
        #from keras.callbacks import ModelCheckpoint
        import matplotlib.pyplot as plt

        encoder = LabelBinarizer()
        onehot_y = encoder.fit_transform(y)

        lx, ly, lz = x.shape
        self.meta.update({
            "word_vec_size": lz,
            "len_words": ly,
            "encoder_classes": encoder.classes_
        })

        model = Sequential()

        #+drop 0.1
        model.add(Dense(60, input_shape=(ly, lz)))
        model.add(Dropout(0.1))
        #activate fun exp
        model.add(Dense(60, activation='relu'))
        #drop +0.1
        model.add(Dropout(0.2))
        model.add(Flatten())
        model.add(Dense(onehot_y.shape[1], activation='softmax'))

        model.compile('Adam', loss='mse', metrics=['acc'])
        #checkpointer = ModelCheckpoint(filepath='C:/Users/dkkay/Desktop/Дипломная/train-nn-dev/dist/checkpoints', verbose=1, save_best_only=True)

        history = model.fit(x,
                            onehot_y,
                            batch_size=5,
                            epochs=100,
                            validation_split=0)  #callbacks=[checkpointer])

        scores = float(model.test_on_batch(x, onehot_y)[0])
        print(f'точность на тестовых данных: {scores*100}')

        plt.figure(0)
        plt.subplot(2, 1, 1)
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'], '--')
        plt.title('Loss')
        plt.legend(['Loss', 'Val loss'])

        plt.subplot(2, 1, 2)
        plt.title('Acc')
        plt.plot(history.history['acc'])
        plt.savefig(str(Path(self.output().path).with_suffix('.png')))

        return model
Esempio n. 8
0
class LSTM_RNN:
    def __init__(self,
                 look_back,
                 dropout_probability=0.2,
                 init='he_uniform',
                 loss='mse',
                 optimizer='rmsprop'):
        self.rnn = Sequential()
        self.look_back = look_back
        self.rnn.add(
            LSTM(10, stateful=True, batch_input_shape=(1, 1, 1), init=init))
        self.rnn.add(Dropout(dropout_probability))
        self.rnn.add(Dense(1, init=init))
        self.rnn.compile(loss=loss, optimizer=optimizer)

    def batch_train_test(self, trainX, trainY, testX, testY, nb_epoch=150):
        print('Training LSTM-RNN...')
        for epoch in range(nb_epoch):
            print('Epoch ' + str(epoch + 1) + '/{}'.format(nb_epoch))
            training_losses = []
            testing_losses = []
            for i in range(len(trainX)):
                y_actual = trainY[i]
                for j in range(self.look_back):
                    training_loss = self.rnn.train_on_batch(
                        np.expand_dims(np.expand_dims(trainX[i][j], axis=1),
                                       axis=1), np.array([y_actual]))
                    training_losses.append(training_loss)
                self.rnn.reset_states()

            print('Mean training loss = {}'.format(np.mean(training_losses)))

            mean_testing_loss = []
            for i in range(len(testX)):
                for j in range(self.look_back):
                    testing_loss = self.rnn.test_on_batch(
                        np.expand_dims(np.expand_dims(testX[i][j], axis=1),
                                       axis=1), np.array([testY[i]]))
                    testing_losses.append(testing_loss)
                self.rnn.reset_states()

                for j in range(self.look_back):
                    y_pred = self.rnn.predict_on_batch(
                        np.expand_dims(np.expand_dims(testX[i][j], axis=1),
                                       axis=1))
                self.rnn.reset_states()

            mean_testing_loss = np.mean(testing_losses)
            print('Mean testing loss = {}'.format(mean_testing_loss))
        return mean_testing_loss
def train_model(feature_layers,
                classification_layers,
                image_list,
                nb_epoch,
                nb_classes,
                img_rows,
                img_cols,
                weights=None):
    # Create testset data for cross-val
    num_images = len(image_list)
    test_size = int(0.2 * num_images)
    print("Train size: ", num_images - test_size)
    print("Test size: ", test_size)

    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)

    if not (weights is None):
        model.set_weights(weights)

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-' * 40)
        print('Epoch', e)
        print('-' * 40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images - test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])
    return model, model.get_weights()
    def test_model_learning_phase(self):
        def one_zero(x):
            return K.in_train_phase(K.zeros_like(x), K.ones_like(x))

        model = Sequential([Lambda(one_zero, input_shape=(1, ))])
        model.compile("sgd", "mse")

        x = np.random.rand(10, 1)
        l1 = model.test_on_batch(x, np.ones_like(x))
        l2 = model.train_on_batch(x, np.zeros_like(x))
        self.assertEqual(l1, 0)
        self.assertEqual(l2, 0)

        model = OracleWrapper(model, BiasedReweightingPolicy(), score="loss")
        l1 = model.evaluate_batch(x, np.ones_like(x))[0].sum()
        l2 = model.train_batch(x, np.zeros_like(x), np.ones_like(x))[0].sum()
        self.assertEqual(l1, 0)
        self.assertEqual(l2, 0)
Esempio n. 11
0
def NN_LSTM(dataX, dataY, labels, look_back):

    trainX = np.reshape(dataX, (dataX.shape[0], 1, dataX.shape[1]))

    model = Sequential()
    model.add(LSTM(input_dim=4, output_dim=16))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(trainX, labels, epochs=1000, batch_size=look_back)
    print "Neural Network LSTM Accuracy:" + str(
        model.test_on_batch(trainX, labels)[1])
    plt.plot(range(1000), history.history['acc'])
    plt.show()
Esempio n. 12
0
def train():
    # model
    X_shape = (FLAGS.image_height, FLAGS.image_width, 3)
    model = Sequential()
    
    model.add(Conv2D(128, kernel_size=3, padding=1, strides=1, input_shape=X_shape))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # conv => RELU => POOL
    model.add(Conv2D(256, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    # flatten => RELU layers
    model.add(Conv2D(512, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Conv2D(1024, kernel_size=3, padding=1, strides=1))
    model.add(advanced_activations.LeakyReLU(alpha=0.3))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Dense(FLAGS.charset_size)
    model.add(Activation('softmax'))

    optimizer = Adam()
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])


    for epoch in range(FLAGS.num_epochs):
        for i, (X_train, y_train) in enumerate(train_data_iterator()):
            X_train, y_train = np.array(X_train), np.array(y_train)
            loss = model.train_on_batch(X_train, y_train)
            print(f"epoch: {epoch} step: {i} loss: {loss}")

            if i % 100 == 0:
                X_test, y_test = test_data_helper()
                X_test, y_test = np.array(X_test), np.array(y_test)
                score = model.test_on_batch(X_test, y_test)
                print(f"score: {score}")

if __name__ == '__main__':
    train()
Esempio n. 13
0
def train_model(feature_layers, classification_layers, image_list, nb_epoch, nb_classes, img_rows, img_cols, weights=None): 
    # Create testset data for cross-val
    num_images = len(image_list)
    test_size = int(0.2 * num_images)
    print("Train size: ", num_images-test_size)
    print("Test size: ", test_size)

    model = Sequential()
    for l in feature_layers + classification_layers:
        model.add(l)

    if not(weights is None):
        model.set_weights(weights)

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])
    return model, model.get_weights()
class LSTM_RNN:

    def __init__(self, look_back, dropout_probability = 0.2, init ='he_uniform', loss='mse', optimizer='rmsprop'):
        self.rnn = Sequential()
        self.look_back = look_back
        self.rnn.add(LSTM(10, stateful = True, batch_input_shape=(1, 1, 1), init=init))
        self.rnn.add(Dropout(dropout_probability))
        self.rnn.add(Dense(1, init=init))
        self.rnn.compile(loss=loss, optimizer=optimizer)

    def batch_train_test(self, trainX, trainY, testX, testY, nb_epoch=150):
        print('Training LSTM-RNN...')
        for epoch in range(nb_epoch):
            print('Epoch '+ str(epoch+1) +'/{}'.format(nb_epoch))
            training_losses = []
            testing_losses = []
            for i in range(len(trainX)):
                y_actual = trainY[i]
                for j in range(self.look_back):
                    training_loss = self.rnn.train_on_batch(np.expand_dims(np.expand_dims(trainX[i][j], axis=1), axis=1),
                                                       np.array([y_actual]))
                    training_losses.append(training_loss)
                self.rnn.reset_states()

            print('Mean training loss = {}'.format(np.mean(training_losses)))

            mean_testing_loss = []
            for i in range(len(testX)):
                for j in range(self.look_back):
                    testing_loss = self.rnn.test_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1),
                                                          np.array([testY[i]]))
                    testing_losses.append(testing_loss)
                self.rnn.reset_states()

                for j in range(self.look_back):
                    y_pred = self.rnn.predict_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1))
                self.rnn.reset_states()

            mean_testing_loss = np.mean(testing_losses)
            print('Mean testing loss = {}'.format(mean_testing_loss))
        return mean_testing_loss
    print('Iteration', iteration)

    print("Training")
    progbar = generic_utils.Progbar(train_num_samples)
    gen = samples_generator(train_sequences, batch_size,
                            num_samples=train_num_samples)
    for X, y in gen:
        loss, accuracy = model.train_on_batch(X, y, accuracy=True)
        progbar.add(batch_size, values=[("train loss", loss),
                    ("train acc", accuracy)])
    print()

    print("Validating")
    progbar = generic_utils.Progbar(valid_num_samples)
    gen = samples_generator(valid_sequences, batch_size,
                            num_samples=valid_num_samples)
    valid_loss = 0
    for X, y in gen:
        loss, accuracy = model.test_on_batch(X, y, accuracy=True)
        progbar.add(batch_size, values=[("valid loss", loss),
                    ("valid acc", accuracy)])
        valid_loss += loss
    print()
    valid_loss /= float(valid_num_samples)

    print("Valid Loss: {}, Best Loss: {}".format(valid_loss, best_loss))
    if valid_loss < best_loss:
        print("Saving model")
        save_model(model, "sentence_model")
        best_loss = valid_loss
Esempio n. 16
0
def main():
    # the data, shuffled and split between tran and test sets
    data, labels = load_data(filename)
    mat = scipy.io.loadmat(subjectsFilename, mat_dtype=True)
    subjNumbers = np.squeeze(mat['subjectNum'])     # subject IDs for each trial

    # Creating the folds
    # kf = StratifiedKFold(np.squeeze(labels), n_folds=ksplit, shuffle=True, random_state=123)
    # kf = KFold(labels.shape[0], n_folds=ksplit, shuffle=True, random_state=123)
    # fold_pairs = [(tr, ts) for (tr, ts) in kf]

    # Leave-Subject-Out cross validation
    fold_pairs = []
    for i in np.unique(subjNumbers):
        ts = subjNumbers == i
        tr = np.squeeze(np.nonzero(np.bitwise_not(ts)))
        ts = np.squeeze(np.nonzero(ts))
        np.random.shuffle(tr)       # Shuffle indices
        np.random.shuffle(ts)
        fold_pairs.append((tr, ts))


    validScores, testScores = [], []
    for foldNum, fold in enumerate(fold_pairs):
        (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = reformatInput(data, labels, fold)
        print('X_train shape:', X_train.shape)
        print(X_train.shape[0], 'train samples')
        print(X_valid.shape[0], 'valid samples')
        print(X_test.shape[0], 'test samples')

        X_train = X_train.astype("float32", casting='unsafe')
        X_valid = X_valid.astype("float32", casting='unsafe')
        X_test = X_test.astype("float32", casting='unsafe')
        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, nb_classes)
        Y_valid = np_utils.to_categorical(y_valid, nb_classes)
        Y_test = np_utils.to_categorical(y_test, nb_classes)

        # Building the network
        model = Sequential()

        model.add(Convolution2D(40, 3, 3, border_mode='full',
                                input_shape=(image_dimensions, shapex, shapey)))
        model.add(Activation('relu'))
        model.add(Convolution2D(40, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        # model.add(Convolution2D(80, 3, 3, border_mode='full'))
        # model.add(Activation('relu'))
        # model.add(Convolution2D(80, 3, 3))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes))
        model.add(Activation('softmax'))

        # model.add(Convolution2D(nb_filters[0], image_dimensions, nb_conv[0], nb_conv[0], border_mode='full'))
        # # model.add(BatchNormalization([nb_filters[0], nb_conv[0], nb_conv[0], image_dimensions]))
        # model.add(Activation('relu'))
        # model.add(Convolution2D(nb_filters[0], nb_filters[0], nb_conv[0], nb_conv[0]))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(poolsize=(nb_pool[0], nb_pool[0])))
        # model.add(Dropout(0.25))
        #
        # model.add(Convolution2D(nb_filters[1], nb_filters[0], nb_conv[0], nb_conv[0], border_mode='full'))
        # model.add(Activation('relu'))
        # model.add(Convolution2D(nb_filters[1], nb_filters[1], nb_conv[1], nb_conv[1]))
        # model.add(Activation('relu'))
        # model.add(MaxPooling2D(poolsize=(nb_pool[1], nb_pool[1])))
        # model.add(Dropout(0.25))
        #
        # model.add(Flatten())
        # # the image dimensions are the original dimensions divided by any pooling
        # # each pixel has a number of filters, determined by the last Convolution2D layer
        # model.add(Dense(nb_filters[-1] * (shapex / nb_pool[0] / nb_pool[1]) * (shapey / nb_pool[0] / nb_pool[1]), 1024))
        # # model.add(BatchNormalization([1024]))
        # model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        # model.add(Dense(1024, nb_classes))
        # model.add(Activation('softmax'))

        # let's train the model using SGD + momentum (how original).
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd)

        if not data_augmentation:
            print("Not using data augmentation or normalization")

            X_train = X_train.astype("float32", casting='unsafe')
            X_test = X_test.astype("float32", casting='unsafe')
            X_train /= 255.
            X_test /= 255.
            model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True)
            score, accu = model.evaluate(X_test, Y_test, batch_size=batch_size, show_accuracy=True)
            print('Test accuracy:', accu)

        else:
            print("Using real time data augmentation")
            # X_train = (X_train - np.mean(X_train, axis=0)) / np.std(X_train.flatten(), axis=0)
            # X_valid = (X_valid - np.mean(X_valid, axis=0)) / np.std(X_valid.flatten(), axis=0)
            # X_test = (X_test - np.mean(X_test, axis=0)) / np.std(X_test.flatten(), axis=0)
            # X_train = (X_train - np.mean(X_train, axis=0))
            # X_valid = (X_valid - np.mean(X_train, axis=0))
            # X_test = (X_test - np.mean(X_train, axis=0))
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=True,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=False,  # randomly flip images
                vertical_flip=False)  # randomly flip images

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(X_train)
            best_validation_accu = 0

            for e in range(nb_epoch):

                print('-'*40)
                print('Epoch', e)
                print('-'*40)
                print("Training...")
                # batch train with realtime data augmentation
                progbar = generic_utils.Progbar(X_train.shape[0])
                for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=batch_size):
                    score, trainAccu = model.train_on_batch(X_batch, Y_batch, accuracy=True)
                    progbar.add(X_batch.shape[0], values=[("train accuracy", trainAccu)])

                print("Validating...")
                # Validation time!
                progbar = generic_utils.Progbar(X_valid.shape[0])
                epochValidAccu = []
                for X_batch, Y_batch in datagen.flow(X_valid, Y_valid, batch_size=batch_size):
                    score, validAccu = model.test_on_batch(X_batch, Y_batch, accuracy=True)
                    epochValidAccu.append(validAccu)
                    progbar.add(X_batch.shape[0], values=[("validation accuracy", validAccu)])
                meanValidAccu = np.mean(epochValidAccu)
                if meanValidAccu > best_validation_accu:
                    best_validation_accu = meanValidAccu
                    best_iter = e
                    print("Testing...")
                    # test time!
                    progbar = generic_utils.Progbar(X_test.shape[0])
                    epochTestAccu = []
                    for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=batch_size):
                        score, testAccu = model.test_on_batch(X_batch, Y_batch, accuracy=True)
                        epochTestAccu.append(testAccu)
                        progbar.add(X_batch.shape[0], values=[("test accuracy", testAccu)])
                    model.save_weights('weigths_{0}'.format(foldNum), overwrite=True)
            validScores.append(best_validation_accu)
            testScores.append(np.mean(epochTestAccu))
    scipy.io.savemat('cnn_results', {'validAccu': validScores, 'testAccu': testScores})
    print ('Average valid accuracies: {0}'.format(np.mean(validScores)))
    print ('Average test accuracies: {0}'.format(np.mean(testScores)))
Esempio n. 17
0
for e in range(epoch_num):

    print 'Epoch #{}/{}'.format(e+1,epoch_num)
    sys.stdout.flush()

    shuffle(tr_it)


    for u in tqdm(tr_it):
        l,a=model.train_on_batch(tr_in[u],tr_out[u])
        tr_hist.r.addLA(l,a,tr_out[u].shape[0])
    # clear_output()
    tr_hist.log()

    for u in range(dev_in.shape[0]):
        l,a=model.test_on_batch(dev_in[u],dev_out[u])
        dev_hist.r.addLA(l,a,dev_out[u].shape[0])
    dev_hist.log()


    # for u in range(tst_in.shape[0]):
    #     l,a=model.test_on_batch(tst_in[u],tst_out[u])
    #     tst_hist.r.addLA(l,a,tst_out[u].shape[0])
    # tst_hist.log()


pickle.dump(model, open('models/classifier_enc.pkl','wb'))
pickle.dump(dev_hist, open('models/testHist_enc.pkl','wb'))
pickle.dump(tr_hist, open('models/trainHist_enc.pkl','wb'))

Esempio n. 18
0
class LSTMOneHotLabels(AClassifier):
    def __init__(self,
                 model_name,
                 tf_graph=None,
                 input_size=300,
                 dropout_rate=[0.2, 0.2],
                 layer_sizes=[100, 100],
                 activation=['tanh', 'tanh'],
                 optimizer='adam',
                 loss='mean_squared_error'):
        self.inputSize = input_size
        self.model_name = model_name
        self.file_extension = '.h5'
        self.tf_graph = tf_graph
        if self.tf_graph is None:
            self.tf_graph = KBackend.get_session().graph

        with self.tf_graph.as_default():
            self.model = Sequential()
            self.model.add(
                GaussianNoise(0.1, input_shape=(None, self.inputSize)))

            for idx in range(len(layer_sizes) - 1):
                self.model.add(
                    LSTM(layer_sizes[idx],
                         input_shape=(None, self.inputSize),
                         return_sequences=True,
                         batch_size=1,
                         activation=activation[idx]))
                self.model.add(Dropout(dropout_rate[idx]))

            self.model.add(
                LSTM(layer_sizes[-1],
                     input_shape=(None, self.inputSize),
                     return_sequences=False,
                     batch_size=1,
                     activation=activation[-1]))
            self.model.add(Dropout(dropout_rate[-1]))
            self.model.add(Dense(10, activation='softmax'))

            self.model.compile(optimizer=optimizer,
                               loss=loss,
                               metrics=['accuracy'])
            # print(self.model.summary())

    def train(self, X, Y, epochs=25, split=0.10, print_information=False):

        splitVal = int(len(X) * split)
        X_test = X[:splitVal]
        Y_test = Y[:splitVal]
        X_train = X[splitVal:]
        Y_train = Y[splitVal:]
        train_losses = []
        train_acc = []
        test_losses = []
        test_acc = []
        actual_acc = 0.00

        try:
            with self.tf_graph.as_default():
                for idx in range(epochs):
                    if actual_acc < 0.60:
                        if print_information:
                            print("Epoch {}".format(idx))
                        epoch_train_loss = 0
                        epoch_train_acc = 0
                        epoch_test_loss = 0
                        epoch_test_acc = 0
                        for i in range(len(X_train)):
                            if print_information:
                                print("Training with text {}".format(i))
                            x_train = np.array(X_train[i])
                            y_train = np.array(Y_train[i])
                            x_train = x_train.reshape(1, len(x_train),
                                                      self.inputSize)
                            y_train = y_train.reshape(1, 10)
                            loss, acc = self.model.train_on_batch(
                                x_train, y_train)
                            epoch_train_loss += loss
                            epoch_train_acc += acc

                        for i in range(len(X_test)):
                            x_test = np.array(X_test[i])
                            y_test = np.array(Y_test[i])
                            x_test = x_test.reshape(1, len(x_test),
                                                    self.inputSize)
                            y_test = y_test.reshape(1, 10)

                            loss, acc = self.model.test_on_batch(
                                x_test, y_test)
                            epoch_test_loss += loss
                            epoch_test_acc += acc

                        train_losses.append(epoch_train_loss / len(X_train))
                        train_acc.append(epoch_train_acc / len(X_train))
                        test_losses.append(epoch_test_loss / len(X_test))
                        test_acc.append(epoch_test_acc / len(X_test))

                        if idx == epochs - 1 and print_information:
                            print('----------------')
                            print(
                                'epoch #{}\ntrain: loss: {:f} acc: {:f}\ntest: loss: {:f} acc: {:f}'
                                .format(idx, train_losses[-1], train_acc[-1],
                                        test_losses[-1], test_acc[-1]))
                        actual_acc = test_acc[-1]
                        #print("====================")
                        #print("Epoch {}: \nloss_train = {} \nacc_train = {} \nloss_test = {} \nacc_test = {}".format(idx,loss_train,acc_train,loss_test,acc_test))
                        #print("====================")
                    else:
                        break

                prediction_correct = 0
                test_samples = 0

                # calculate accuracy (test method in MODEL)
                for i in range(0, len(X_test)):
                    if print_information:
                        print("Testing Batch - ", i)
                    x_test = np.array(X_test[i])
                    y_test = np.array(Y_test[i])
                    x_test = x_test.reshape(1, len(x_test), self.inputSize)
                    y_test = y_test.reshape(1, 10)
                    loss_test, acc_test = self.model.test_on_batch(
                        x_test, y_test)
                    test_samples += 1
                    if acc_test == 1.0:
                        prediction_correct += 1

                accuracy = prediction_correct / test_samples
                accuracy = accuracy * 100
                if print_information:
                    print("feeding input --> FINISHED")
                    print("ACCURACY: ", accuracy, "%")
        except BaseException as e:
            print("[CLASSIFICATION] Error occurred during training: " + str(e))
            raise RuntimeError("Error occurred during training")

        if print_information:
            pyplot.plot(train_losses, 'r-', test_losses, 'b-')
            pyplot.xlabel('epoch')
            pyplot.ylabel('loss')
            pyplot.title(self.model_name + ' ' + actual_acc)
            pyplot.savefig('data/training_stats/' + self.model_name +
                           'losses.png')
            pyplot.close()

            pyplot.plot(train_acc, 'r-', test_acc, 'b-')
            pyplot.xlabel('epoch')
            pyplot.ylabel('acc')
            pyplot.title(self.model_name + ' ' + actual_acc)
            pyplot.savefig('data/training_stats/' + self.model_name +
                           'acc.png')
            pyplot.close()

    def predict(self, X):
        predictions = []
        with self.tf_graph.as_default():
            for i in range(len(X)):
                x_predict = np.array(X[i])
                x_predict = x_predict.reshape(1, len(x_predict),
                                              self.inputSize)
                predictions.append(self.model.predict_on_batch(x_predict))
            return np.reshape(np.array(predictions),
                              (len(predictions), predictions[0].shape[1]))

    def save(self, path):
        path += self.file_extension
        with self.tf_graph.as_default():
            try:
                self.model.save(path)
            except BaseException as e:
                print('Saving model to {} failed'.format(path))
                raise SaveModelException(
                    'Saving model to {} failed'.format(path)) from e

    def load(self, path):
        path += self.file_extension
        if not os.path.isfile(path):
            raise LoadModelException(
                "Loading model failed. File {} not found.".format(path))
        with self.tf_graph.as_default():
            try:
                self.model = keras.models.load_model(path)
            except OSError as e:
                print('Loading model from {} failed'.format(path))
                raise LoadModelException(
                    'Loading model from {} failed'.format(path)) from e
Esempio n. 19
0
        print('Iteration', iteration)
    
        print("Training")
        progbar = generic_utils.Progbar(train_num_samples)
        gen = samples_generator(train_sentences, train_next_char, char_indices,
                batch_size, num_samples=train_num_samples, X=X, y=y)
        for X, y in gen:
            loss, accuracy = model.train_on_batch(X, y, accuracy=True)
            progbar.add(batch_size, values=[("train loss", loss), ("train acc",
                accuracy)])
    
        print("Validating")
        progbar = generic_utils.Progbar(valid_num_samples)
        gen = samples_generator(valid_sentences, valid_next_char, char_indices,
                batch_size, num_samples=valid_num_samples, X=X, y=y)
        valid_loss = 0
        for X, y in gen:
            loss, accuracy = model.test_on_batch(X, y, accuracy=True)
            progbar.add(batch_size, values=[("valid loss", loss), ("valid acc",
                accuracy)])
            valid_loss += loss
        valid_loss /= float(valid_num_samples)
    
        print("Valid Loss: {}, Best Loss: {}".format(valid_loss, best_loss))
        if valid_loss < best_loss:
            print("Saving model")
            save_model(model, "language_model")
            best_loss = valid_loss 
    
        sample_model(model, char_indices, indices_char, random.choice(sentences))
                                         window_size=4, negative_samples=test_labels, sampling_table=None)
 tag_couples, labels = sequence.tagged_cbows(seq, test_tag_seq, len(tag_tokenizer.word_index)+1, # replace seq with train_tag_seq
                                         window_size=4, negative_samples=test_labels, sampling_table=None)
 
 couples = np.array(couples, dtype="int32")
 tag_couples = np.array(tag_couples, dtype="int32")
 labels = np.array(labels)
 X_w = np.array(np.split(couples, len(seq)))
 X_t = np.array(np.split(tag_couples, len(seq)))
 if test_labels ==0 :
     # Divide number of examples to rank so that GPU does not cause out of memory error
     splitter = get_min_divisor(len(labels))
     test_y = np.reshape(np.empty_like(labels, dtype = 'float32'),(labels.shape[0],1))
     for j in range(splitter):
         test_loss, test_y_block = model.test_on_batch([X_w[:,j*(labels.shape[0]/splitter): (j+1)*(labels.shape[0]/splitter) ,:], 
                                                        X_t[:,j*(labels.shape[0]/splitter): (j+1)*(labels.shape[0]/splitter) ,:]],
                                                 labels[j*(labels.shape[0]/splitter): (j+1)*(labels.shape[0]/splitter)]) 
         test_y[j*(labels.shape[0]/splitter): (j+1)*(labels.shape[0]/splitter)] = test_y_block
 else:
     test_loss, test_y = model.test_on_batch([X_w, X_t], labels) 
 
 lraps.append(label_ranking_average_precision_score(np.reshape(np.array(labels),test_y.shape).T , test_y.T))
 mrr, recall, prec = test_utils.print_accuracy_results(np.array(labels) , np.reshape(test_y, np.array(labels).shape))
 mrrs.append(mrr)
 recalls.append(recall)
 precs.append(prec)
 losses.append(test_loss)
 test_losses.append(test_loss)
 if len(losses) % 100 == 0:
     progbar.update(i, values=[("loss", np.sum(losses))])
     losses = []
Esempio n. 21
0
# Pooling layer1
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))

# Conv layer2 output shape (64,14,14)
model.add(Conv2D(64, 5, padding='same'))
model.add(Activation('relu'))

# Pooling layer2 (max pool) output shape (64,7,7)
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))

# FC
model.add(Flatten())
model.add(Dense(1024, activation='relu'))

model.add(Dense(10, activation='softmax'))

adam = Adam(epsilon=1e-8)

model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['acc'])

# start to fit model
print('Training------------------------')
model.fit(X_train, y_train, batch_size=32, epochs=2)

# test model
print('\nTesting---------------------------')
loss, accuracy = model.test_on_batch(X_test, y_test)

print('test loss:', loss)
print('test accuracy:', accuracy)
Esempio n. 22
0
class rnnmlp ():
        def __init__(self, r=(21, 109), dt=0.3):
                self.r=r
                self.dt=dt
                self.rnnModel = Sequential()
                self.maxFeatures=r[1]-r[0] +1

	'''
	simple RNN model, 
	'''
        def SimpleRNNModel(self, nHidden=120, lr = 0.01):
                self.rnnModel.add(SimpleRNN( nHidden, input_shape =( None, self.maxFeatures), activation='sigmoid', return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)

	'''
	LSTM model
	'''
        def LSTMModel(self, nHidden=150, lr = 0.01):
#               print('nHidden: %i\tlr: %.3f' % ( nHidden, lr) )
                self.rnnModel.add(GRU( nHidden, activation='sigmoid', input_shape =( None, self.maxFeatures), return_sequences=True))
#                self.rnnModel.add(LSTM( nHidden, activation='sigmoid', input_shape =( None, nHidden), return_sequences=True))
                self.rnnModel.add(TimeDistributedDense(nHidden))
                self.rnnModel.add(Activation('relu'))
                self.rnnModel.add(TimeDistributedDense(self.maxFeatures))
                self.rnnModel.add(Activation('softmax'))
                rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-06)
                self.rnnModel.compile(loss='categorical_crossentropy', optimizer=rmsprop)

        '''
	train module :
	train model , 
	file_name, the name of train or test file
	weight_save_file, save the model parameters
	'''
	def train(self, file_name, weight_save_file, batch_size=1, num_epoch=200):
                print('load data ---------------')

                file_train=os.path.join(os.path.split(os.path.dirname(__file__))[0],
                                'data',file_name,'train','*.mid')
                dataset = [midiread(f, self.r, self.dt).piano_roll.astype(theano.config.floatX) for f in glob.glob(file_train)]

                file_test=os.path.join(os.path.split(os.path.dirname(__file__))[0],
                                'data',file_name,'test','*.mid')
                testdataset = [midiread(f, self.r, self.dt).piano_roll.astype(theano.config.floatX) for f in glob.glob(file_test)]
                print('load done --------------')
                try:
                        for epoch in range(num_epoch):
                                t0 = time.time()
                                numpy.random.shuffle(dataset)
                                costs = []
                                accuracys = []
                                for s, sequence in enumerate(dataset):
                                        y = numpy.hstack((sequence,numpy.zeros((sequence.shape[0],1)) ))
                                        x = numpy.roll(y, 1, axis=0)
                                        x[0,:]=0
                                        x[0,self.maxFeatures-1]=1
                                        cost, accuracy= self.rnnModel.train_on_batch(numpy.array([x]), numpy.array([y]), accuracy=True)
                                        costs.append(cost)
                                        accuracys.append(accuracy)

                                print('epoch: %i/%i\tcost: %.5f\taccu: %.5f\ttime: %.4f s' % (epoch+1, num_epoch, numpy.mean(costs), numpy.mean(accuracys),time.time()-t0))
                                sys.stdout.flush()
                                test_accu=self.evaluate(testdataset)
                                print('test_accu: %.5f' % ( numpy.mean(test_accu)) )
                        self.rnnModel.save_weights(weight_save_file)
                except KeyboardInterrupt:
                        print('interrupt by user !')
	
	'''
	evaluate module :
	evaluate model with test data, compute cost and accuracy
	'''
        def evaluate(self, test_dataset):
                test_accuracy =[]
                for s, sequence in enumerate(test_dataset):
                        test_y = numpy.hstack((sequence,numpy.zeros((sequence.shape[0],1)) ))
                        test_x = numpy.roll(test_y, 1, axis=0)
                        test_x[0,:]=0
                        test_x[0,self.maxFeatures-1]=1
                        cost, accu = self.rnnModel.test_on_batch(numpy.array([test_x]),numpy.array([test_y]), accuracy=True)
                        test_accuracy.append(accu)
                return test_accuracy

	'''
	generate function : 
	generate music or chord,
	init_chord: the first note of the generate sequence
	file_name: file to save the sequence of generate notes
	LS : if true , add Lsystem , generate chord
		if false, no Lsystem, generate music notes
	chord_name: chord name under condition of LS = True
	chord_file: notes of all kinds of chords, load file
	state_file: Lsystem model parameters, load file
	n_steps: the length of generate sequence
	r: notes which counts
	'''
        def generate(self, init_chord, file_name, LS=False, chord_name=None, chord_file=None, state_file=None, n_steps=80, r=(21,109)):
		if(LS):
			Lsystem = LSystem(chord_name, init_chord, chord_file, state_file, r)
                init_sequence = numpy.zeros((1, n_steps +1, self.maxFeatures))
                init_sequence[:, 0, init_chord-self.r[0]] = 1
                for i in numpy.arange(n_steps):
                        probs = self.rnnModel.predict_proba(init_sequence)[:, i, :]
                        for j in numpy.arange(len(init_sequence)):
				if(LS):
					ind = Lsystem.getMaxProbs(probs[j,0:(self.maxFeatures-1)],True)
				else:
					ind = maxProbs(probs[j,0:(self.maxFeatures-1)])
                                init_sequence[j, i+1, ind] = 1

                generate_sq = [sq[:,0:(self.maxFeatures-1)].nonzero()[1] for sq in init_sequence]
                print(generate_sq[0] + self.r[0])
		if(LS):
			print(Lsystem.cur_chord)
			print(Lsystem.cur_state)
			print(Lsystem.cur_opes)
                midiwrite(file_name, init_sequence[0,:,0:(self.maxFeatures-1)], self.r, self.dt)
		extent = (0, self.dt * len(init_sequence[0,:,0:(self.maxFeatures-1)])) + self.r
		pylab.figure()
		pylab.imshow(init_sequence[0,:,0:(self.maxFeatures-1)].T, origin='lower', aspect='auto',interpolation='nearest', cmap=pylab.cm.gray_r,extent=extent)
		pylab.xlabel('time (s)')
		pylab.ylabel('MIDI note number')
		pylab.title('generated piano-roll')



	'''
	load module: load model
	'''
        def loadModel(self, weight_save_file):
                self.rnnModel.load_weights(weight_save_file)
def analyze_whale_data(**kwargs):
    print("analyzing whale data")
    # seed random
    np.random.seed(kwargs["seed"])

    # model inputs and parameters
    #X_train = kwargs["X_train"]
    #X_test = kwargs["X_test"]
    #Y_train = kwargs["Y_train"]
    #Y_test = kwargs["Y_test"]
    
    X_train=np.load(home+'/gabor/numpyFiles/Training Set.npy')
    X_test=np.load(home+'/gabor/numpyFiles/TestSet.npy')
    Y_train=np.load(home+'/gabor/numpyFiles/Training Labels.npy')
    Y_test=np.load(home+'/gabor/numpyFiles/TestSet Labels.npy')
    
    X_test = X_test.reshape(-1, 1, 30, 96)
    Y_test = np_utils.to_categorical(Y_test, 447)
    
    
    X_train = X_train.reshape(-1, 1, 30, 96)
    Y_train = np_utils.to_categorical(Y_train, 447)
    
    img_rows = kwargs["img_rows"]
    img_cols = kwargs["img_cols"]

    nb_filters = kwargs.get("nb_filters", 32)
    nb_conv = kwargs.get("nb_conv", 3)
    nb_pool = kwargs.get("nb_pool", 2)

    batch_size = kwargs.get("batch_size", 32)
    nb_epoch = kwargs.get("nb_epoch", 12)
    nb_classes = kwargs.get("nb_classes", 447)

    print("X_test.shape == {};".format(X_test.shape))
    print("Y_test.shape == {};".format(Y_test.shape))
    print("X_test.shape == {};".format(X_train.shape))
    print("Y_test.shape == {};".format(Y_train.shape))
    
    # CNN architecture
    print("--> creating CNN network ...")
    model = Sequential()

    # layer 1
    model.add(

        Convolution2D(
            nb_filters,
            nb_conv,
            nb_conv,
            border_mode='full',
            input_shape=(1, img_rows, img_cols)
        )
    )
    model.add(Activation('relu'))

    # layer 2
    model.add(Convolution2D(32, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    # layer 3
    model.add(Convolution2D(64, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    
    # layer 4
    model.add(Convolution2D(128, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    
    # layer 5
    model.add(Flatten())
    model.add(Dense(1000))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    
    # layer 6
    model.add(Dense(1000))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    # layer 4
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # compile, fit and evaluate model
    print("--> compiling CNN functions")
    model.compile(
        loss='categorical_crossentropy',
        optimizer='adadelta'
    )
    print("--> fitting CNN")
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True)  # randomly flip images
    
    datagen.fit(X_train)
    
    loss1 = np.zeros((nb_epoch))
    loss1 = loss1.astype(np.float32)
    acc1 = np.zeros((nb_epoch))
    acc1 = acc1.astype(np.float32)
    score1 = np.zeros((nb_epoch))
    score1 = score1.astype(np.float32)
    test_acc1 = np.zeros((nb_epoch))
    test_acc1 = test_acc1.astype(np.float32)    
    
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            loss, acc = model.train_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("train loss", loss), ("train accuracy", acc)])
        loss1[e] = loss
        acc1[e] = acc

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score, test_acc = model.test_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("test loss", score), ("test accuracy", test_acc)])
        score1[e] = score
        test_acc1[e] = test_acc
    
    print("--> saving CNN")
    json_string = model.to_json()
    open('my_model_architecture.json', 'w').write(json_string)
    model.save_weights('my_model_weights.h5') 
    
    return (loss1, acc1, score1, test_acc1)
Esempio n. 24
0
                X, y = get_data(files[n:n+batch_size], n)
            else:
                X, y = get_data(files[n:], n)

            X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
            X_train = np.array(X_train)
            X_test = np.array(X_test)
            y_train = np.array(y_train)
            y_test = np.array(y_test)

            # convert class vectors to binary class matrices
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            Y_test = np_utils.to_categorical(y_test, nb_classes)

            model.train_on_batch(X_train, Y_train)
            l, a = model.test_on_batch(X_test, Y_test, accuracy=True)

            acc.append(a)
            loss.append(l)
        print ''
        print "Val_loss", (sum(loss) / len(loss))
        print "Val_acc", (sum(acc) / len(acc))

    # with random batch draws....
    # -------
    # Epoch 1
    # Val_loss 0.889370705837
    # Val_acc 0.479363625794

    # -------
    # Epoch 2
Esempio n. 25
0
def train(shared_variables):
    shared_variables["modelEvent"].wait()
    shared_variables["modelEvent"].clear()

    global env
    global roads

    for key in shared_variables["lanes"]:
        shared_variables["lanes"][key] = Lane(key)

    # print(shared_variables["lanes"])
    # sys.exit()

    print(shared_variables["edges_lanes"])

    roads = [
        Road("zeytouna-ain_mreysse", 100, 100, [
            shared_variables["lanes"]["zeytouna-ain_mreysse_0"],
            shared_variables["lanes"]["zeytouna-ain_mreysse_1"],
            shared_variables["lanes"]["zeytouna-ain_mreysse_2"],
            shared_variables["lanes"]["zeytouna-ain_mreysse_3"]
        ]),
        Road("aub-ain_mreysse", 100, 100, [
            shared_variables["lanes"]["aub-ain_mreysse_0"],
            shared_variables["lanes"]["aub-ain_mreysse_1"]
        ]),
        Road("bliss-ain_mreysse", 100, 200, [
            shared_variables["lanes"]["bliss-ain_mreysse_0"],
            shared_variables["lanes"]["bliss-ain_mreysse_1"]
        ])
    ]

    # for edge, lanes in shared_variables["edges_lanes"].items():
    #     lanes_input = []
    #     for lane in lanes:
    #         lanes_input.append(shared_variables["lanes"][lane])
    #     roads.append(Road(edge,shared_variables["max_queue_lengths"][edge],shared_variables["max_time_delays"][edge],lanes_input))

    # Define environment/game
    num_phases = shared_variables["num_phases"]
    env = Simulation(shared_variables["current_time"], roads, num_phases)
    phase_duration = 0
    num_lanes = env.get_total_num_lanes()
    input_size = 1 + 2 * num_lanes

    # Define Model
    model = Sequential()
    model.add(Dense(input_size, input_shape=(input_size, ), activation='relu'))
    model.add(Dense(input_size, activation='relu'))
    model.add(Dense(num_phases))
    model.compile(sgd(lr=shared_variables["learning_rate"]),
                  shared_variables["optimizer"])

    try:
        model.load_weights("model.h5")
    except:
        print("failed to load model")

    # Initialize experience replay object
    exp_replay = ExperienceReplay(shared_variables["max_memory"],
                                  shared_variables["discount"])

    shared_variables["lock"].acquire()
    while not shared_variables["doneEvent"].is_set():

        shared_variables["lock"].release()
        shared_variables["modelEvent"].wait()
        shared_variables["modelEvent"].clear()

        state = "\nold state:\t\t" + str(
            np.asarray(env.get_old_state()[0]).tolist())
        state += "\nnew phase:\t\t" + str(env.get_current_phase())
        state += "\nreward:\t\t\t" + str(env.get_reward())
        state += "\nnew state:\t\t" + str(
            np.asarray(env.get_state()[0]).tolist()) + "\n"

        if shared_variables["mode"] == "train":
            # # adapt model
            exp_replay.remember([
                env.get_old_state(),
                env.get_current_phase(),
                env.get_reward(),
                env.get_state()
            ])
            inputs, targets = exp_replay.get_batch(
                model, batch_size=shared_variables["batch_size"])
            shared_variables["loss"] += model.train_on_batch(inputs, targets)

        elif shared_variables["mode"] == "test_model":
            exp_replay.remember([
                env.get_old_state(),
                env.get_current_phase(),
                env.get_reward(),
                env.get_state()
            ])
            inputs, targets = exp_replay.get_batch(
                model, batch_size=shared_variables["batch_size"])
            shared_variables["loss"] += model.test_on_batch(inputs, targets)

        for edge, lanes in shared_variables["edges_lanes"].items():
            shared_variables["cumulative_queue_length_" + edge] += \
            [road for road in env.get_roads() if road.name == edge][0].get_avg_queue_lengths()
            shared_variables["cumulative_time_delay_" + edge] += \
            [road for road in env.get_roads() if road.name == edge][0].get_avg_time_delays()

        phase_duration += 1
        env.set_old_state()

        # if phase_duration == shared_variables["phase_duration"]:
        phase_duration = 0
        # get next phase
        if shared_variables["mode"] == "train" and np.random.rand(
        ) <= shared_variables["exploration"]:
            new_phase = np.random.randint(0, num_phases, size=1)[0]
        else:
            q = model.predict(env.get_state())
            new_phase = np.argmax(q[0])

        env.set_current_phase(new_phase)
        shared_variables["new_phase"] = env.get_current_phase(
        )  # if not shared_variables["accumulation"] or accumulation_counter > shared_variables["accumulate_duration"] else shared_variables["accumulate_phase"]

        # apply new phase, get rewards and new state

        # state += "\nnew phase:\t\t" + str(shared_variables["new_phase"]) + "\n"

        # print(state)

        shared_variables["states"] += state

        shared_variables["lock"].acquire()
        shared_variables["serverEvent"].set()

    if shared_variables["mode"] == "train":
        model.save_weights("model.h5", overwrite=True)
        # with open("model.json", "w") as outfile:
        #     json.dump(model.to_json(), outfile)

    shared_variables["lock"].release()

    print("Model exiting")
Esempio n. 26
0
def train_whale_data(**kwargs):
    print("analyzing whale data")
    # seed random
    np.random.seed(kwargs["seed"])

    # model inputs and parameters
    data_augmentation = kwargs.get("data_augmentation", False)
    X_train = kwargs["X_train"]
    X_test = kwargs["X_test"]
    Y_train = kwargs["Y_train"]
    Y_test = kwargs["Y_test"]

    img_rows = kwargs["img_rows"]
    img_cols = kwargs["img_cols"]

    nb_filters = kwargs.get("nb_filters", 32)
    nb_conv = kwargs.get("nb_conv", 3)
    nb_pool = kwargs.get("nb_pool", 2)

    batch_size = kwargs["batch_size"]
    nb_epoch = kwargs.get("nb_epoch", 12)
    nb_classes = kwargs.get("nb_classes", 10)

    model_file = kwargs["model_file"]
    weights_file = kwargs["weights_file"]
    results_file = kwargs["results_file"]

    # CNN architecture
    print("--> creating CNN network ...")
    results = {
        "acc": [],
        "val_acc": [],
        "loss": [],
        "val_loss": []
    }
    model = Sequential()

    # layer 1
    model.add(
        Convolution2D(
            nb_filters,
            nb_conv,
            nb_conv,
            input_shape=(1, img_rows, img_cols),
        )
    )
    model.add(Activation('relu'))
    model.add(Dropout(0.4))

    # layer 2
    model.add(
        Convolution2D(
            nb_filters,
            nb_conv,
            nb_conv,
            input_shape=(1, img_rows, img_cols),
        )
    )
    model.add(Activation('relu'))
    model.add(Dropout(0.4))

    # layer 3
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.4))

    # layer 4
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # compile, fit and evaluate model
    print("--> compiling CNN functions")
    model.compile(
        loss='categorical_crossentropy',
        optimizer='sgd'
    )

    # fit model
    print("--> fitting CNN")
    if data_augmentation is False:
        print "--> fitting data"
        fitlog = model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=1,
            validation_data=(X_test, Y_test)
        )
        results = fitlog.history

    else:
        # turn on data augmentation
        print "--> augmenting data"
        datagen = ImageDataGenerator(
            featurewise_center=False,
            featurewise_std_normalization=False,
            rotation_range=20,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True,
        )
        datagen.fit(X_train)

        print "--> fitting data"
        for e in range(nb_epoch):
            print "epoch:", e
            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size):
                train_loss, train_accuracy = model.train_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )
                valid_loss, valid_accuracy = model.test_on_batch(
                    X_test,
                    Y_test,
                    accuracy=True
                )

            results["acc"].append(float(train_accuracy))
            results["val_acc"].append(float(valid_accuracy))
            results["loss"].append(float(train_loss))
            results["val_loss"].append(float(valid_loss))

            print "acc: {0}".format(train_accuracy),
            print "val_acc: {0}".format(valid_accuracy),
            print "acc_loss: {0}".format(train_loss),
            print "val_loss: {0}".format(valid_loss)

    # save model
    model_data = model.to_json()
    model_file = open(model_file, "w")
    model_file.write(json.dumps(model_data))
    model_file.close()

    # save model weights
    model.save_weights(weights_file, overwrite=True)

    # save results
    results["nb_epoch"] = nb_epoch
    results["batch_size"] = batch_size
    rf = open(results_file, "w")
    rf.write(json.dumps(results))
    rf.close()
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train,batch_size=128):
            loss = modelCascade.train_on_batch(X_batch, Y_batch,accuracy=True)
            progbar.add(X_batch.shape[0], values=[('train loss', loss[0]),('train accuracy',loss[1])])      
        print('Testing...')
    # test time!
        accuracyArray = list()
        lossArray = list()
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score = modelCascade.test_on_batch(X_batch, Y_batch,accuracy=True)
            lossArray.append(score[0])
            accuracyArray.append(score[1])
            progbar.add(X_batch.shape[0], values=[('test loss', score[0]),('test accuracy',score[1])])
        lossIteration1.append(np.mean(lossArray))
        accuracyIteration1.append(np.mean(accuracyArray))            

    weightsLayer1 = modelCascade.layers[0].get_weights()

    print('SECOND ITERATION STARTING')
    #####SECOND ITERATION
    #MODEL THAT IS USED TO GENERATE INPUT HAT
    modelIH = Sequential()
    modelIH.add(Convolution2D(32, 3, 3, border_mode='same',
                        input_shape=(img_channels, img_rows, img_cols),weights=weightsLayer1))
    modelIH.add(Activation('relu'))
Esempio n. 28
0
def cnn(**kwargs):
    verbose = kwargs.get("verbose", True)
    X_train = kwargs["X_train"]
    Y_train = kwargs["Y_train"]
    X_test = kwargs["X_test"]
    Y_test = kwargs["Y_test"]
    input_shape = kwargs["input_shape"]
    nb_classes = kwargs["nb_classes"]
    data_augmentation = kwargs.get("data_augmentation", True)

    nb_convo_layers = kwargs["nb_convo_layers"]
    nb_filters = kwargs["nb_filters"]
    nb_conv = kwargs["nb_conv"]

    convo_activations = kwargs["convo_activations"]
    maxpools = kwargs["maxpools"]
    pool_sizes = kwargs["pool_sizes"]
    convo_dropouts = kwargs["convo_dropouts"]

    nb_dense_layers = kwargs["nb_dense_layers"]
    dense_hidden_neurons = kwargs["dense_hidden_neurons"]
    dense_activations = kwargs["dense_activations"]
    dense_dropouts = kwargs["dense_dropouts"]

    loss = kwargs["loss"]
    optimizer = kwargs["optimizer"]
    nb_epoch = kwargs["nb_epoch"]
    batch_size = kwargs["batch_size"]

    model_file = kwargs.get("model_file")
    weights_file = kwargs.get("weights_file")
    results_file = kwargs.get("results_file")
    results = {
     "acc": [],
     "loss": [],
     "val_acc": [],
     "val_loss": []
    }

    # CNN architecture
    model = Sequential()

    # convolution layers
    for i in range(nb_convo_layers):
        # convo layer
        if i == 0:
            model.add(
                Convolution2D(
                    nb_filters[i],
                    nb_conv[i],
                    nb_conv[i],
                    input_shape=input_shape
                )
            )
        else:
            model.add(
                Convolution2D(
                    nb_filters[i],
                    nb_conv[i],
                    nb_conv[i],
                    border_mode='valid',
                )
            )

        # activation
        if convo_activations[i]:
            model.add(Activation(convo_activations[i]))

        # max-pooling
        if maxpools[i]:
            model.add(MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i])))

        # dropout
        if convo_dropouts[i]:
            model.add(Dropout(convo_dropouts[i]))

    # dense layers
    model.add(Flatten())
    for i in range(nb_dense_layers):
        # dense layer
        if (i + 1) == nb_dense_layers:
            model.add(Dense(nb_classes))
        else:
            model.add(Dense(dense_hidden_neurons[i]))

        # activation
        if dense_activations[i]:
            model.add(Activation(dense_activations[i]))

        # dropout
        if dense_dropouts[i]:
            model.add(Dropout(dense_dropouts[i]))

    # loss function and optimizer
    if verbose:
        print("--> compiling CNN")
    model.compile(loss=loss, optimizer=optimizer)

    # fit model
    if verbose:
        print("--> fitting CNN")

    if data_augmentation is False:
        fitlog = model.fit(
            X_train,
            Y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=verbose,
            validation_data=(X_test, Y_test)
        )
        results = fitlog.history

    else:
        # turn on data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=True,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,
            rotation_range=0,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            vertical_flip=True
        )
        datagen.fit(X_train)

        for e in range(nb_epoch):
            if verbose:
                print "epoch:", e

            tmp_train_acc = []
            tmp_train_loss = []
            tmp_test_acc = []
            tmp_test_loss = []
            train_batch_counter = 0
            test_batch_counter = 0

            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size):
                train_loss, train_accuracy = model.train_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )

                tmp_train_acc.append(train_accuracy)
                tmp_train_loss.append(train_loss)
                train_batch_counter += 1

            for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size):
                valid_loss, valid_accuracy = model.test_on_batch(
                    X_batch,
                    Y_batch,
                    accuracy=True
                )
                tmp_test_acc.append(valid_accuracy)
                tmp_test_loss.append(valid_loss)
                test_batch_counter += 1

            epoch_train_acc = sum(tmp_train_acc) / float(train_batch_counter)
            epoch_train_loss = sum(tmp_train_loss) / float(train_batch_counter)
            epoch_test_acc = sum(tmp_test_acc) / float(test_batch_counter)
            epoch_test_loss = sum(tmp_test_loss) / float(test_batch_counter)

            results["acc"].append(epoch_train_acc)
            results["loss"].append(epoch_train_loss)
            results["val_acc"].append(epoch_test_acc)
            results["val_loss"].append(epoch_test_loss)

            if verbose:
                print "acc: {0}".format(epoch_train_acc),
                print "loss: {0}".format(epoch_train_loss),
                print "val_acc: {0}".format(epoch_test_acc),
                print "val_loss: {0}".format(epoch_test_loss)

    # save model
    if model_file:
        model_data = model.to_json()
        model_file = open(model_file, "w")
        model_file.write(json.dumps(model_data))
        model_file.close()

    # save model weights
    if weights_file:
        model.save_weights(weights_file, overwrite=True)

    # save results
    if results_file:
        results["nb_epoch"] = nb_epoch
        results["batch_size"] = batch_size
        rf = open(results_file, "w")
        rf.write(json.dumps(results))
        rf.close()

    # evaluate
    score = model.evaluate(
        X_test,
        Y_test,
        show_accuracy=True,
        verbose=verbose,
        batch_size=batch_size
    )

    return results, score
Esempio n. 29
0
    ]) / size_train * nepochs)
print(
    'The same but with the predictions computed in a different way >>>',
    sum([
        round(auth) == y[0][0][0]
        for auth, y in zip(pred_author_train2, Y_train * nepochs)
    ]) / size_train * nepochs)
print('Evaluate...')
# reinitialize metrics
loss, acc = 0, 0
test_metrics = np.empty((size_test, 2))
pred_author_test = []
pred_author_test2 = []
for i, (x_test, y_test) in enumerate(zip(X_test, Y_test)):
    # considering all predictions for each sequence
    test_metrics[i] = model.test_on_batch(x_test, y_test)
    # consider only the last prediction for each sequence
    pred_author_test.append(sigmoid1_out(lstm1_out([x_test]))[0][0, -1, 0])
    pred_author_test2.append(model.predict(x_test)[0, -1, 0])
    if (i + 1) % step_len == 0:
        nsteps = (i + 1) // step_len
        loss = inc_avg(loss,
                       np.mean(test_metrics[(nsteps - 1) * step_len:i + 1, 0]),
                       nsteps)
        acc = inc_avg(acc,
                      np.mean(test_metrics[(nsteps - 1) * step_len:i + 1, 1]),
                      nsteps)
        print('iteration {2: <5} > loss: {0} - acc: {1}'.format(
            loss, acc, i + 1))
if (size_test % step_len
    ) != 0:  # all batch means computed, update means with remaining metrics
Esempio n. 30
0
            tr_loss, tr_acc = model.train_on_batch(x_in, y_in)

            mean_tr_acc.append(tr_acc)
            mean_tr_loss.append(tr_loss)

        # move this into the inner loop and watch the network never learn
        # move into inner loop and reset every x < recall_len for fancy failure
        model.reset_states()

    print('accuracy training = {}'.format(np.mean(mean_tr_acc)))
    print('loss training = {}'.format(np.mean(mean_tr_loss)))
    print('___________________________________')

    mean_te_acc = []
    mean_te_loss = []
    for seq_idx in range(X_test.shape[0]):
        start_val = X_test[seq_idx, 0]
        assert y_test[seq_idx] == start_val
        assert tuple(np.nonzero(X_test[seq_idx, :]))[0].shape[0] == start_val

        y_in = np.array([y_test[seq_idx]], dtype=np.bool)

        for j in range(recall_len):
            te_loss, te_acc = model.test_on_batch(np.array([[[X_test[seq_idx][j]]]], dtype=np.bool), y_in)
            mean_te_acc.append(te_acc)
            mean_te_loss.append(te_loss)
        model.reset_states()

    print('accuracy testing = {}'.format(np.mean(mean_te_acc)))
    print('loss testing = {}'.format(np.mean(mean_te_loss)))
print('___________________________________')
Esempio n. 31
0
callbacks = [early_stop, save_best]

# Train model
t0 = time.time()
hist = model.fit(train_set[0],
                 train_set[1],
                 validation_data=valid_set,
                 verbose=2,
                 callbacks=callbacks,
                 nb_epoch=1000,
                 batch_size=20)
time_elapsed = time.time() - t0

# Load best model
model.load_weights("models/%s.mdl" % MDL_NAME)

# Print time elapsed and loss on testing dataset
test_set_loss = model.test_on_batch(test_set[0], test_set[1])
print "\nTime elapsed: %f s" % time_elapsed
print "Testing set loss: %f" % test_set_loss

# Save results
qri.save_results("results/%s.out" % MDL_NAME, time_elapsed, test_set_loss)
qri.save_history("models/%s.hist" % MDL_NAME, hist.history)

# Plot training and validation loss
qri.plot_train_valid_loss(hist.history)

# Make predictions
qri.plot_test_predictions(model, train_set)
Esempio n. 32
0
        inputs /= 255
        loss, acc = model.train_on_batch(inputs, targets)
        train_loss += loss
        train_acc += acc
        train_batches += 1

    # And a full pass over the validation data:
    val_loss = 0
    val_acc = 0
    val_batches = 0
    test_str.reset() # re-start streaming
    for inputs, targets, pad in val_str:
        if pad: # not full batch
            break
        inputs /= 255
        loss, acc = model.test_on_batch(inputs, targets)
        val_loss += loss
        val_acc += acc
        val_batches += 1

    # Then we print the results for this epoch:
    print("Epoch {}/{} time {:.3f}s; train loss: {:.6f} acc:{:.6f}; val loss: {:.6f} acc:{:.2f}".format(
        epoch + 1, nb_epoch, time.time() - start_time,
        train_loss/train_batches, train_acc/train_batches,
        val_loss/val_batches, val_acc/val_batches))

# After training, we compute and print the test error:
test_loss = 0
test_acc = 0
test_batches = 0
for inputs, targets, pad in test_str:
                if i % 10 == 0:
                    print('Batch : ', i, '/',
                          train_one_batch, ', loss in minibatch: ',
                          float(train_loss), ', acc in minibatch: ',
                          float(train_acc), 'current best: ', best_acc)

                avg_train_loss += float(train_loss)
                avg_train_acc += float(train_acc)

                if i % 100 == 0:
                    avg_dev_acc = 0.0
                    dev_data.shuffle()

                    for j, (data_, sentiments_) in enumerate(
                            _batch_loader(dev_data, config.batch)):
                        _, dev_acc = model.test_on_batch(data_, sentiments_)
                        avg_dev_acc += float(dev_acc)

                    cur_acc = avg_dev_acc / dev_one_batch

                    print('Epoch : ', epoch, 'Batch : ', i, '/',
                          train_one_batch, 'Validation ACC : ', cur_acc)

                    if cur_acc >= best_acc and config.savemodel == True:
                        best_acc = cur_acc

                        print(
                            '###################  Best Acc Found  #############'
                        )
                        model.save('./modelsave/{}epoch'.format(epoch) +
                                   config.savename)
Esempio n. 34
0
    print('Test score:', score)

else:
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], img_channels, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], img_channels, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])

    json_string = model.to_json()
    open('cnn1_model_architecture.json', 'w').write(json_string)
    model.save_weights('cnn1_model_weights.h5')

Esempio n. 35
0
'''


i=0
for file in fileList:
    print file
    print i
    x_train,y_train=readPklFile(file)
    #print x_train
    if x_train.ndim<3:
        continue
    if(len(x_train)==0):
        continue
    if i<1800:
        model.fit(x_train,y_train,nb_epoch=40,batch_size=32,verbose=1,show_accuracy=True)
    else:
        classes=model.predict(x_train,batch_size=32,verbose=1)
        y_test=Nomalization(classes)
        predictEntity.writelines(file+'\n'+"original:\n"+str(y_train)+"\npredict:\n"+str(y_test)+'\n')
        loss,acc=model.test_on_batch(x_train,y_train,accuracy=True)
        
        #print loss,acc
        #acc= CalculateAcc(y_train,y_test)
        FileEntity.writelines(file+'\n'+'loss: '+str(loss)+"\tacc: "+str(acc)+"\n")        
        
        #model.evaluate(x_train,y_train,batch_size=32,verbose=1,show_accuracy=True)
    i=i+1
FileEntity.close()
predictEntity.close()
print "finish"
Esempio n. 36
0
        loss, accuracy = model.train_on_batch(b, l)
        end_time = time.time()

        print('batch {}/{} loss: {} accuracy: {} time: {}ms'.format(
            int(current_index / batch_size), int(nice_n / batch_size), loss,
            accuracy, 1000 * (end_time - start_time)),
              flush=True)

    print('epoch {}/{}'.format(i, epochs))

current_index = 0
loss = 0.0
acc = 0.0

while current_index + batch_size < len(training_images):
    b, l = get_batch()

    score = model.test_on_batch(b, l)
    print('Test batch score:', score[0])
    print('Test batch accuracy:', score[1], flush=True)

    loss += score[0]
    acc += score[1]

loss = loss / int(nice_n / batch_size)
acc = acc / int(nice_n / batch_size)

print('Test score:', loss)
print('Test accuracy:', acc)
Esempio n. 37
0
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")

        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            score, acc = model.test_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("train accuracy", acc)])

        print("Testing...")

        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score, acc = model.test_on_batch(X_batch, Y_batch, accuracy=True)
            progbar.add(X_batch.shape[0], values=[("test accuracy", acc)])

        # test time!
        for X_batch, Y_batch in datagen.flow(X_te_orig, np.ones((1,X_te_orig.shape[0])), batch_size = X_te_orig.shape[0]):
            y_te = model.predict_classes(X_batch)

        save_out(y_te,labels_string,sorted_files_te,submission_fname)
Esempio n. 38
0
# model.add(Activation("relu"))
# model.add(Dense(3, kernel_initializer="normal"))

# model.compile(optimizer=Adam(lr=1e-6), loss="mse")

tensorboard = TensorBoard(log_dir="logs/{}".format(time.time()))
print(x_train)
print(y_train)
history = model.fit(x_train,
                    y_train,
                    epochs=100,
                    batch_size=100,
                    verbose=1,
                    callbacks=[tensorboard],
                    shuffle=True)
model.test_on_batch(x_test, y_test)

scores = model.evaluate(x_test, y_test)
print(model.summary())
import os

os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
plot_model(model, to_file="plot.png", show_layer_names=True, show_shapes=True)

print(history.history.keys())
#  "Accuracy"
plt.plot(history.history['acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
Esempio n. 39
0
    print('Loss = {}, Precision = {}, Recall = {}, F1 = {}'.format(avgLoss, con_dict['r'], con_dict['p'], con_dict['f1']))
    
    
    print("Validating =>")
    
    val_pred_label = []
    avgLoss = 0
    
    bar = progressbar.ProgressBar(max_value=len(val_x))
    for n_batch, sent in bar(enumerate(val_x)):
        label = val_label[n_batch]
        label = np.eye(n_classes)[label][np.newaxis,:]
        sent = sent[np.newaxis,:]
        
        if sent.shape[1] > 1: #some bug in keras
            loss = model.test_on_batch(sent, label)
            avgLoss += loss

        pred = model.predict_on_batch(sent)
        pred = np.argmax(pred,-1)[0]
        val_pred_label.append(pred)

    avgLoss = avgLoss/n_batch
    
    predword_val = [ list(map(lambda x: idx2la[x], y)) for y in val_pred_label]
    con_dict = conlleval(predword_val, groundtruth_val, words_val, 'r.txt')
    val_f_scores.append(con_dict['f1'])
    
    print('Loss = {}, Precision = {}, Recall = {}, F1 = {}'.format(avgLoss, con_dict['r'], con_dict['p'], con_dict['f1']))

    if con_dict['f1'] > best_val_f1:
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            
            loss = model.train_on_batch(X_batch, X_batch.reshape(X_batch.shape[0],X_train.shape[2]**2*3))
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, X_test):
            score = model.test_on_batch(X_batch, X_batch.reshape(X_batch.shape[0],X_train.shape[2]**2*3))
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
            
    model2 = Sequential()
    model2.add(encoder)
    codes = []
    targets = []
    model2.compile(loss = "mean_squared_error", optimizer = "sgd")
    for X_batch, Y_batch in datagen.flow(X_train, Y_train):
        codes.append(model2.predict(X_batch))
        targets.append(np.argmax(Y_batch))
        
    print('stack it...')
    codes = np.vstack(codes)
    targets = np.vstack(targets)
    print(codes.shape,'code shape')
Esempio n. 41
0
    train_f_scores.append(con_dict['f1'])
    print('Loss = {}, Precision = {}, Recall = {}, F1 = {}'.format(
        avgLoss, con_dict['r'], con_dict['p'], con_dict['f1']))

    print("Validating =>")
    val_pred_label = []
    avgLoss = 0

    bar = progressbar.ProgressBar(max_value=len(val_x))
    for n_batch, sent in bar(enumerate(val_x)):
        label = val_label[n_batch]
        label = np.eye(n_classes)[label][np.newaxis, :]
        sent = sent[np.newaxis, :]

        if sent.shape[1] > 1:
            loss = model.test_on_batch(sent, label)
            avgLoss += loss

        pred = model.predict_on_batch(sent)
        pred = np.argmax(pred, -1)[0]
        val_pred_label.append(pred)

    avgLoss = avgLoss / n_batch

    predword_val = [list(map(lambda x: idx2la[x], y)) for y in val_pred_label]
    con_dict = conlleval(predword_val, groundtruth_val, words_val, 'r.txt')
    val_f_scores.append(con_dict['f1'])

    print('Loss = {}, Precision = {}, Recall = {}, F1 = {}'.format(
        avgLoss, con_dict['r'], con_dict['p'], con_dict['f1']))
	#TESTING GOES HERE
	print('Testing...')
	accuracyArray = list()
	lossArray = list()
	progbar = generic_utils.Progbar(X_test.shape[0])
	j = 0
	while(j != sizeTest):
		if(j+batch_size >= sizeTest):
			batchToTest = model2.predict(X_test[j:sizeTest])
			labelOfBatchToTest = Y_test[j:sizeTest,:]
			j = sizeTest
		else:
			batchToTest = model2.predict(X_test[j:j+batch_size])
			labelOfBatchToTest = Y_test[j:j+batch_size,:]
			j += batch_size
		score = model3.test_on_batch(batchToTest, labelOfBatchToTest,accuracy=True)
		lossArray.append(score[0])
		accuracyArray.append(score[1])
		progbar.add(batchToTest.shape[0], values=[('test loss', score[0]),('test accuracy',score[1])])
	lossIteration2.append(np.mean(lossArray))
	accuracyIteration2.append(np.mean(accuracyArray)) 

weightsLayer2 = model3.layers[0].get_weights()

print('ITERATION 2 STARTING (FW)')

#PREVIOUS MODEL
model2 = Sequential()
model2.add(Convolution2D(96, nb_conv, nb_conv,
                        input_shape=(1, img_rows, img_cols),weights=weightsLayer1))
model2.add(Activation('relu'))
Esempio n. 43
0
class LanguageModel(RNNModel):
    def __init__(self, *args, **kwargs):
        '''
        field is 'sentence' by default, but can be e.g. pos_sentence.
        '''
        self.field = kwargs.pop('field', 'sentence')
        super(LanguageModel, self).__init__(*args, **kwargs)
        self.class_to_code = {'VBZ': 0, 'VBP': 1}
        self.inflect_verb, _ = gen_inflect_from_vocab(self.vocab_file)

    def process_single_dependency(self, dep):
        dep['label'] = dep['verb_pos']
        tokens = dep[self.field].split()
        return tokens

    def create_train_and_test(self, examples):
        random.seed(1)
        random.shuffle(examples)

        first = 1
        self.X_train = []
        self.Y_train = []
        self.X_test = self.Y_test = []  # not used; just for compatibility
        self.deps_train = []
        n_train = int(len(examples) * self.prop_train)
        for _, ints, dep in examples[:n_train]:
            self.deps_train.append(dep)
            for i in range(first, len(ints) - 1):
                self.X_train.append(ints[:i])
                self.Y_train.append(ints[i])

        self.Y_train = np.asarray(self.Y_train)
        self.deps_test = [x[2] for x in examples[n_train:]]

    def create_model(self):
        self.log('Creating model')
        self.model = Sequential()
        self.model.add(
            Embedding(len(self.vocab_to_ints) + 1,
                      self.embedding_size,
                      input_length=self.maxlen))
        self.model.add(
            self.rnn_class(output_dim=self.rnn_output_size,
                           input_length=self.maxlen))
        self.model.add(Dense(len(self.vocab_to_ints) + 1))
        self.model.add(Activation('softmax'))

    def compile_model(self):
        self.log('Compiling model')
        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer='adam')

    def results(self):
        recs = []
        columns = ['gram_loss', 'ungram_loss', 'correct'] + dependency_fields
        self.model.model._make_test_function()
        progbar = Progbar(len(self.deps_test))
        for i, dep in enumerate(self.deps_test):
            inp = np.zeros((1, self.maxlen))
            v = int(dep['verb_index']) - 1
            tokens = dep[self.field].split()[:v + 1]
            ints = [self.vocab_to_ints[x] for x in tokens]
            try:
                ungram = self.vocab_to_ints[self.inflect_verb[tokens[v]]]
            except KeyError:  # reinflected form not in vocabulary: ignore
                continue
            n = len(ints) - 1
            inp[0, -n:] = ints[:-1]
            gram_loss = self.model.test_on_batch(inp, np.array([ints[v]]))
            ungram_loss = self.model.test_on_batch(inp, np.array([ungram]))
            recs.append((gram_loss, ungram_loss, gram_loss < ungram_loss) +
                        tuple(dep[x] for x in dependency_fields))
            if i % 16 == 0:
                progbar.update(i)

        self.test_results = pd.DataFrame(recs, columns=columns)

    def train(self, n_epochs=10):
        if not hasattr(self, 'model'):
            self.create_model()
            self.compile_model()

        self.serialize_class_data()
        self.serialize_model()

        validation_split = 0.1
        split_at = int(len(self.X_train) * (1. - validation_split))
        x, val_x = self.X_train[:split_at], self.X_train[split_at:]
        y, val_y = self.Y_train[:split_at], self.Y_train[split_at:]
        training_loss_history = []
        validation_loss_history = []

        for epoch in range(n_epochs):
            print('Epoch', epoch)
            training_loss = []
            end = int(float(len(x)) / self.batch_size)
            progbar = Progbar(end)
            for i in range(0, len(x), self.batch_size):
                inp = sequence.pad_sequences(x[i:i + self.batch_size],
                                             maxlen=self.maxlen)
                out = y[i:i + self.batch_size]
                loss = self.model.train_on_batch(inp, out)
                training_loss.append(loss)
                j = int(float(i) / self.batch_size)
                if j % 16 == 0:
                    progbar.update(j)
            progbar.update(end)

            # test on validation set
            validation_loss = []
            print()
            print('Evaluating on validation set:')
            end = int(float(len(val_x)) / self.batch_size)
            progbar = Progbar(end)
            for i in range(0, len(val_x), self.batch_size):
                inp = sequence.pad_sequences(val_x[i:i + self.batch_size],
                                             maxlen=self.maxlen)
                out = val_y[i:i + self.batch_size]
                output = self.model.test_on_batch(inp, out)
                validation_loss.append(output)
                j = int(float(i) / self.batch_size)
                if j % 16 == 0:
                    progbar.update(j)
            progbar.update(end)

            training_loss_history.append(np.mean(training_loss))
            validation_loss_history.append(np.mean(validation_loss))
            filename = op.join(self.serialization_dir,
                               'weights_epoch%d.h5' % epoch)
            self.model.save_weights(filename, overwrite=True)
            print()
            print(('Mean training loss: %5.3f; mean validation loss: %5.3f\n' %
                   (training_loss_history[-1], validation_loss_history[-1])))
            if (len(validation_loss_history) > 1
                    and validation_loss_history[-1] >=
                    validation_loss_history[-2]):
                break

        self.training_history = (list(map(float, training_loss_history)),
                                 list(map(float, validation_loss_history)))

    def evaluate(self, howmany=1000):
        self.model.model._make_test_function()
        random.seed(0)
        shuffled = self.deps_test[:]
        random.shuffle(shuffled)
        shuffled = shuffled[:howmany]
        X_test = []
        Y_test = []

        for dep in shuffled:
            tokens = self.process_single_dependency(dep)
            ints = []
            for token in tokens:
                if token not in self.vocab_to_ints:
                    # zero is for pad
                    x = self.vocab_to_ints[token] = len(self.vocab_to_ints) + 1
                    self.ints_to_vocab[x] = token
                ints.append(self.vocab_to_ints[token])

            first = 1
            for i in range(first, len(ints) - 1):
                X_test.append(ints[:i])
                Y_test.append(ints[i])

        test_loss = []
        end = int(float(len(X_test) / self.batch_size))
        progbar = Progbar(end)
        for i in range(0, len(X_test), self.batch_size):
            inp = sequence.pad_sequences(X_test[i:i + self.batch_size],
                                         maxlen=self.maxlen)
            out = Y_test[i:i + self.batch_size]
            output = self.model.test_on_batch(inp, out)
            test_loss.append(output)
            j = int(float(i) / self.batch_size)
            if j % 16 == 0:
                progbar.update(j)
        progbar.update(end)

        return np.mean(test_loss)
Esempio n. 44
0
                X, y = get_data(files[n:n+batch_size], n)
            else:
                X, y = get_data(files[n:], n)

            X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)  # , random_state=0
            X_train = np.array(X_train)
            X_test = np.array(X_test)
            y_train = np.array(y_train)
            y_test = np.array(y_test)

            # convert class vectors to binary class matrices
            Y_train = np_utils.to_categorical(y_train, nb_classes)
            Y_test = np_utils.to_categorical(y_test, nb_classes)

            model.train_on_batch(X_train, Y_train)
            l, a = model.test_on_batch(X_test, Y_test)

            acc.append(a)
            loss.append(l)
        print "Epoch: %s <==> Val_loss: %s <> Val_acc: %s "% ( str(e), str(sum(loss) / len(loss)), str(sum(acc) / len(acc)) )

'''
fill between on open and close included:
Epoch: 0 <==> Val_loss: 1.19601917664 <> Val_acc: 0.491666666667
Epoch: 1 <==> Val_loss: 0.895532437166 <> Val_acc: 0.441666666667
Epoch: 2 <==> Val_loss: 0.89461884896 <> Val_acc: 0.408333333333
Epoch: 3 <==> Val_loss: 0.900162645181 <> Val_acc: 0.433333333333
Epoch: 4 <==> Val_loss: 0.908274920781 <> Val_acc: 0.4
Epoch: 5 <==> Val_loss: 0.906020263831 <> Val_acc: 0.45
Epoch: 6 <==> Val_loss: 0.910774775346 <> Val_acc: 0.475
Epoch: 7 <==> Val_loss: 0.922956418991 <> Val_acc: 0.45
Esempio n. 45
0
        b_x = X[b*batch_size:b*batch_size+batch_size] # (b, 50, 69)
        b_y = Y[b*batch_size:b*batch_size+batch_size] # (b, 50, 48)
        loss, acc = model.train_on_batch(b_x, b_y)
        train_loss += loss*b_x.shape[0]
        train_acc += acc*b_x.shape[0]
    train_loss /= train_len
    train_acc /= train_len
    
    valid_loss = 0.0
    valid_acc = 0.0
    #count = 0
    for b in range(valid_batch_num): 
        b_x = X_valid[b*batch_size:b*batch_size+batch_size] # (b, 50, 69)
        b_y = Y_valid[b*batch_size:b*batch_size+batch_size] # (b, 50, 48)

        loss, acc = model.test_on_batch(b_x, b_y)
        valid_loss += loss*b_x.shape[0]
        valid_acc += acc*b_x.shape[0]
    valid_loss /= valid_len
    valid_acc /= valid_len
        
    #y = model.predict_on_batch(b_x) # (b, 50, 48)
    #print(y.shape)
    #y = np.reshape(y, (-1, 48))
    #b_y = np.reshape(b_y, (-1, 48))
    #y = np.argmax(y, axis=1)
    #b_y = np.argmax(b_y, axis=1)
    #acc += sum(y==b_y)
    #count += len(y)
    #print("Accuracy:", str(acc/count))
Esempio n. 46
0
class Model() :

    # mode = 'encode' || 'train'
    def __init__(self, mode) :
        self.io_dim = Codec.n_chars
        with open('config/semantic.json') as file :
            semantic_config = json.load(file)
            self.feature_dim = semantic_config['vectorDim']
            self.seq_len = semantic_config['seqLength']
        self.mode = mode
        self.model = SeqModel()
        self.encoder = SeqContainer()
        self.encoder.add(TimeDistributedDense(
            input_dim = self.io_dim,
            input_length = self.seq_len,
            output_dim = self.feature_dim,
            activation = 'sigmoid'))
        self.encoder.add(GRU(
            input_dim = self.feature_dim,
            input_length = self.seq_len,
            output_dim = self.feature_dim,
            activation = 'sigmoid',
            inner_activation = 'hard_sigmoid',
            truncate_gradient = self.seq_len,
            return_sequences = True))
        self.encoder.add(GRU(
            input_dim = self.feature_dim,
            input_length = self.seq_len,
            output_dim = self.feature_dim,
            activation = 'sigmoid',
            inner_activation = 'hard_sigmoid',
            truncate_gradient = self.seq_len,
            return_sequences = False))
        self.model.add(self.encoder)
        if mode == 'train' :
            self.decoder = SeqContainer()
            self.decoder.add(SimpleRNN(
                input_dim = self.feature_dim,
                input_length = self.seq_len,
                output_dim = self.feature_dim,
                activation = 'sigmoid',
                truncate_gradient = self.seq_len,
                return_sequences = True))
            self.decoder.add(TimeDistributedDense(
                input_dim = self.feature_dim,
                input_length = self.seq_len,
                output_dim = self.io_dim,
                activation = 'sigmoid'))
            self.model.add(RepeatVector(self.seq_len, input_shape = (self.feature_dim,)))
            self.model.add(self.decoder)

    def _load_weights(self, path) :
        with h5py.File(path, 'r') as file :
            group = file['/weights']
            n_layers = group.attrs.get('n_layers')[0]
            weights = []
            for i in range(n_layers) :
                layer_weights = file['/weights/layer_' + str(i)][()]
                weights.append(layer_weights)
            return weights

    def load(self) :
        encoder_weights = self._load_weights('data/encoder.hdf5')
        self.encoder.set_weights(encoder_weights)
        if self.mode == 'train' :
            decoder_weights = self._load_weights('data/decoder.hdf5')
            self.decoder.set_weights(decoder_weights)

    def _save_weights(self, weights, path) :
        with h5py.File(path, 'w') as file :
            group = file.create_group('weights')
            n_layers = len(weights)
            group.attrs.create('n_layers', np.array([n_layers]))
            for i, layer_weights in enumerate(weights) :
                group.create_dataset('layer_' + str(i), data = layer_weights)

    def save(self) :
        if self.mode != 'train' :
            raise Exception('invalid mode')
        encoder_weights = self.encoder.get_weights()
        decoder_weights = self.decoder.get_weights()
        self._save_weights(encoder_weights, 'data/encoder.hdf5')
        self._save_weights(decoder_weights, 'data/decoder.hdf5')

    def compile(self) :
        self.model.compile(loss = 'categorical_crossentropy', optimizer = Adadelta(clipnorm = 1.))

    # in_data & out_data numpy bool array of shape (n_sample, seq_len, io_dim)
    # return train (loss, accuracy)
    def train(self, in_data, out_data) :
        if self.mode != 'train' :
            raise Exception('invalid mode')
        return self.model.train_on_batch(in_data, out_data, accuracy = True)

    # in_data & out_data numpy bool array of shape (n_sample, seq_len, io_dim)
    # return the evaluation (loss, accuracy)
    def evaluate(self, in_data, out_data) :
        if self.mode != 'train' :
            raise Exception('invalid mode')
        return self.model.test_on_batch(in_data, out_data, accuracy = True)

    # sequence : numpy bool array of shape (seq_len, io_dim)
    # return : numpy float32 array of shape (feature_dim)
    def encode(self, sequence) :
        if self.mode != 'encode' :
            raise Exception('invalid mode')
        input_sequences = np.ndarray((1, self.seq_len, self.io_dim), dtype = np.bool)
        input_sequences[0] = sequence
        return self.model.predict(input_sequences)[0]
              metrics=['accuracy'])

# In[ ]:

nb_epoch = 1
nb_train_samples = 2048
nb_validation_samples = 832

# In[ ]:

while True:
    x_train, y_train = train_generator.next()
    print('TRAIN', model.train_on_batch(x_train, y_train))

    x_test, y_test = validation_generator.next()
    print('TEST', model.test_on_batch(x_test, y_test))

# model.fit_generator(
#     train_generator,
#     samples_per_epoch=nb_train_samples,
#     nb_epoch=nb_epoch,
#     validation_data=validation_generator,
#     nb_val_samples=nb_validation_samples)

print('hello done.')

# In[ ]:

model.save_weights('models/1000-samples--1-epochs.h5')

# In[ ]:
Esempio n. 48
0
    index += 1

model.save_weights("test_weights.hdf5", overwrite=True)
model.load_weights("test_weights.hdf5")

end = datetime.now()
diff = end - begin
avgSec = diff.total_seconds() / EPOCHS
avgMin = int(avgSec / 60)
avgHour = int(avgMin / 60)
avgDay = int(avgHour / 24)
avgSec -= 60 * avgMin
avgMin -= 60 * avgHour
avgHour -= 24 * avgDay

# loss, acc = model.evaluate(tX, tY, batch_size=BATCH_SIZE, show_accuracy=True)
progbar = generic_utils.Progbar(tX.shape[0])
index = 0
for i in range(len(tX) / BATCH_SIZE):
    loss, acc = model.test_on_batch(tX[index : index + BATCH_SIZE], tY[index : index + BATCH_SIZE], accuracy=True)
    progbar.add(BATCH_SIZE, values=[("test loss", loss), ("test acc", acc)])
    index += BATCH_SIZE

"""
fResult = open('test.txt', 'a+')
fResult.write('Test loss / test accuracy = %.4f / %.4f\n'%(loss, acc))
fResult.write('mode acc / test mode acc = %.4f / %.4f\n'%(accmode, taccmode))
fResult.write('Average learning time = %ddays %d:%d:%d\n\n'%(avgDay, avgHour, avgMin, avgSec))
fResult.close()
"""
Esempio n. 49
0
class LogRegressionMNIST(Optimizable):
    """
    Implementation of a one-layer neural network to perform logistic
    regression on the MNIST dataset.
    """

    def __init__(self, n_batch: int = 128):
        """
        Constructor.
        :param n_batch: number of elements in the batch used to compute the
        gradients with back-propagation.
        """
        self.n_batch = n_batch
        self.n_classes = 10
        self.img_rows, self.img_cols = 28, 28
        self.input_size = self.img_rows * self.img_cols
        self.x_train, self.y_train, self.n_train, \
            self.x_test, self.y_test, self.n_test =\
            None, None, None, None, None, None
        self.model = None
        self.weights = None
        self.weights_shapes = None
        self.gradients = None
        self.get_keras_gradients = None
        self.history = None
        self.test_history = None
        self.training_history = None
        self.dim_observed = 0
        self.build_dataset()
        return

    def build_dataset(self) -> None:
        """
        Downloads the MNIST dataset if needed and format it (normalization and
        formatting of the y vectors).
        """
        # Load the MNIST dataset and reshape it accordingly to the model
        (self.x_train, self.y_train), (self.x_test, self.y_test) =\
            mnist.load_data()
        self.n_train = self.x_train.shape[0]
        self.n_test = self.x_test.shape[0]
        self.x_train = self.x_train.reshape(self.x_train.shape[0],
                                            self.input_size)
        self.x_test = self.x_test.reshape(self.x_test.shape[0],
                                          self.input_size)
        # Normalize
        self.x_train = self.x_train.astype('float32') / 255.0
        self.x_test = self.x_test.astype('float32') / 255.0
        # Convert class vectors to binary class matrices
        self.y_train = to_categorical(self.y_train, self.n_classes)
        self.y_test = to_categorical(self.y_test, self.n_classes)
        return

    def build_network(self) -> None:
        """
        Builds the network architecture using Keras utilities and compiles it.
        One hidden layer implementing logistic regression.
        """
        self.model = Sequential()
        self.model.add(Dense(self.n_classes, activation='sigmoid',
                             input_dim=self.x_train.shape[1]))
        self.model.add(Dropout(0.7))
        self.model.compile(loss='categorical_crossentropy', optimizer='sgd',
                           metrics=['accuracy'])
        return

    def build_weight_buffers(self) -> None:
        """
        Builds some buffers used to access the weights of the network and the
        shapes of the layers.
        """
        self.weights = self.model.trainable_weights
        self.weights_shapes = []
        self.dim_observed = 0
        for w_vec in self.weights:
            self.weights_shapes.append(w_vec.shape)
            self.dim_observed += int(np.prod(np.array(w_vec.shape)))
        return

    def build_gradient_buffers(self) -> None:
        """
        Builds some buffers that will be used to compute the gradients of the
        loss function with respect to the weights in the network.
        """
        self.gradients =\
            self.model.optimizer.get_gradients(self.model.total_loss,
                                               self.weights)
        input_tensors = [self.model.inputs[0],                      # input data
                         self.model.sample_weights[0],  # weight for each sample
                         self.model.targets[0],                         # labels
                         k_back.learning_phase()]           # train or test mode
        self.get_keras_gradients = k_back.function(inputs=input_tensors,
                                                   outputs=self.gradients)
        return

    def build_model(self) -> None:
        """
        Builds the whole model by invoking the previous auxiliary functions.
        """
        self.build_network()
        self.build_weight_buffers()
        self.build_gradient_buffers()
        self.history = LossHistory()
        self.test_history = []
        self.training_history = []
        return

    def next_train_batch(self) -> [np.array, np.array]:
        """
        Extracts a new batch from the training set.
        :return: list with two elements: the x values of the batch and the
        corresponding targets.
        """
        index = sample(range(self.n_train), self.n_batch)
        x_sample = self.x_train[index, :]
        y_sample = self.y_train[index, :]
        return [x_sample, y_sample]

    def next_test_batch(self) -> [np.array, np.array]:
        """
        Extracts a new batch from the test set.
        :return: list with two elements: the x values of the batch and the
        corresponding targets.
        """
        index = sample(range(self.n_test), self.n_batch)
        x_sample = self.x_test[index, :]
        y_sample = self.y_test[index, :]
        return [x_sample, y_sample]

    def train(self, n_epochs: int = 5) -> None:
        """
        Trains the neural network for a fixed number of epochs using the
        internal optimizers of Keras.
        :param n_epochs: number of training epochs
        """
        self.model.fit(self.x_train, self.y_train, epochs=n_epochs,
                       batch_size=self.n_batch, callbacks=[self.history])
        return

    def predict(self, x_new: np.array) -> np.array:
        """
        Predicts the label for the input x_new using the current state of the
        network.
        :param x_new: numpy array with dimensions
        [n_items, self.img_rows, self.img_cols] containing the test points.
        :return: numpy array containing the predicted labels.
        """
        return self.model.predict(x_new, batch_size=self.n_batch)

    def compute_test_loss(self) -> None:
        """
        Computes the value of the loss function on a randomly chosen test batch.
        """
        [x_batch, y_batch] = self.next_test_batch()
        loss = self.model.test_on_batch(x_batch, y_batch)
        self.history.losses.append(loss)
        self.test_history.append(loss)
        return

    def compute_training_loss(self) -> None:
        """
        Computes the value of the loss function on a randomly chosen test batch.
        """
        [x_batch, y_batch] = self.next_train_batch()
        loss = self.model.test_on_batch(x_batch, y_batch)
        self.history.losses.append(loss)
        self.training_history.append(loss)
        return

    def get_weight_vector(self) -> np.array:
        """
        Gets the values of the weights of the network, re-formatted as a 1-D
        vector.
        :return: 1-D numpy array containing the value of the weights.
        """
        weights = self.model.get_weights()
        n_layers = len(weights)
        weights_vector = weights[0].flatten()
        for n in range(1, n_layers):
            weights_vector = np.append(weights_vector, weights[n].flatten())
        return weights_vector

    def set_weight_vector(self, weights: np.array) -> None:
        """
        Takes the weights vector passed as argument, converts it to a list of
        elements with the right shapes and sets the new weights of the network.
        :param weights: 1-D vector containing the weight values.
        """
        processed_size = 0
        new_weights_list = []
        for tensor_shape in self.weights_shapes:
            present_size = int(np.prod(np.array(tensor_shape)))
            vector_end = processed_size + present_size
            layer_weights = np.copy(weights[processed_size:vector_end])
            new_weights_list.append(np.reshape(layer_weights, tensor_shape))
            processed_size = processed_size + present_size
        self.model.set_weights(new_weights_list)
        return

    def get_gradient_vector(self) -> np.array:
        """
        Compute the gradient of the loss function of the curent weights and
        returns it as a 1D numpy array.
        :return: 1D numpy array containing the numerical value of the gradient.
        """
        [x_batch, y_batch] = self.next_train_batch()
        inputs = [x_batch, np.ones(self.n_batch), y_batch, 0]
        grads = self.get_keras_gradients(inputs)
        n_layers = len(grads)
        grad_vector = grads[0].flatten()
        for n in range(1, n_layers):
            grad_vector = np.append(grad_vector, grads[n].flatten())
        return grad_vector

    def f(self, x: np.array) -> float:
        """
        Returns the value of the training loss function of the network computed
        with the weights in x.
        :param x: numpy array containing the weights of the network.
        :return: value of the objective function at x.
        """
        self.set_weight_vector(x.flatten())
        self.compute_training_loss()
        return self.training_history[-1]

    def get_gradient(self, x: np.array) -> np.array:
        """
        Returns the gradients of the objective function we want to optimize
        computed at x.
        :param x: numpy array containing the desired coordinates.
        :return: function gradients at x.
        """
        self.set_weight_vector(x.flatten())
        gradient = self.get_gradient_vector()
        return gradient
Esempio n. 50
0
    hist = model.fit(origin,
                     target,
                     epochs=numepoch,
                     batch_size=batchsize,
                     validation_split=0.01,
                     sample_weight=data_weights,
                     shuffle=True,
                     callbacks=[history])  # starts training
    stop = timeit.default_timer()

    print('time elapsed => ', stop - start)
    model.save_weights('myweights.h5')
    #print(history.losses);
    np.savetxt('loss.txt', history.losses, delimiter=' ')

print("test loss ", model.test_on_batch(origintest, targettest))
# test error
#testLoss = model.test_on_batch(origintest,targettest);
#print("test loss ",model.evaluate(origintest,targettest,batch_size = None,verbose = 1));

filepath = 'type' + str(nodetypt) + '.txt'
with open(filepath, 'wb') as f:
    lent = len(model.layers)
    f.write((str(lent) + "\n").encode())
    for layer in model.layers:
        weights = layer.get_weights()
        parameter_len = len(weights)
        f.write((str(parameter_len) + "\n").encode())
        for weight in weights:
            header_ = ""
            for s in weight.shape:
Esempio n. 51
0
    print('Test score:', score)

else:
    print('Using real time data augmentation')
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print('Training...')
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(num_images-test_size)
        for X_batch, Y_batch in flow(image_list[0:-test_size]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            loss = model.train_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('train loss', loss)])

        print('Testing...')
        # test time!
        progbar = generic_utils.Progbar(test_size)
        for X_batch, Y_batch in flow(image_list[-test_size:]):
            X_batch = X_batch.reshape(X_batch.shape[0], 3, img_rows, img_cols)
            Y_batch = np_utils.to_categorical(Y_batch, nb_classes)
            score = model.test_on_batch(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[('test loss', score)])

    json_string = model.to_json()
    open('cnn1_model_architecture.json', 'w').write(json_string)
    model.save_weights('cnn1_model_weights.h5')

Esempio n. 52
0
def run():
    datadir = "/reg/d/ana01/temp/davidsch/ImgMLearnFull"
    h5files = glob(os.path.join(datadir, "amo86815_mlearn-r070*.h5"))
    h5files.extend(glob(os.path.join(datadir, "amo86815_mlearn-r071*.h5")))
#    h5files = ["/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0000.h5",
#               "/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0001.h5",
#               "/reg/d/ana01/temp/davidsch/ImgMLearnFull/amo86815_mlearn-r071-c0002.h5"]
    assert len(h5files)>0
    
    datareader = H5MiniBatchReader(h5files=h5files,
                                   minibatch_size=32,
                                   validation_size=64,
                                   feature_dataset='xtcavimg',
                                   label_dataset='acq.peaksLabel',
                                   return_as_one_hot=True,
                                   feature_preprocess=['log','mean'],
                                   number_of_batches=None,
                                   class_labels_max_imbalance_ratio=1.0,
                                   add_channel_to_2D='channel_row_column',
                                   max_mb_to_preload_all=None,
                                   random_seed=None,
                                   verbose=True)  

    validation_features, validation_labels = datareader.get_validation_set()

    print("starting to build and compile keras/theano model...")
    sys.stdout.flush()
    t0 = time.time()
    model = Sequential()

    ## layer 1
    kern01_W_init = (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(8,1,8,8)).astype(np.float32)
    kern01_B_init = np.zeros(8,dtype=np.float32)
    model.add(Convolution2D(8,8,8, border_mode='same', weights=[kern01_W_init, kern01_B_init],
                            input_shape=datareader.features_placeholder_shape()[1:]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(10,10), strides=(13,13)))
    
    ## layer 2
    kern02_W_init = (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(8,8,6,6)).astype(np.float32)
    kern02_B_init = np.zeros(8,dtype=np.float32)
    model.add(Convolution2D(8,6,6, border_mode='same', weights=[kern02_W_init, kern02_B_init]))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(10,10), strides=(13,13)))
    
    model.add(Flatten())
    
    ## layer 3
    H03_W_init =  (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(96,16)).astype(np.float32)
    H03_B_init = np.zeros(16,dtype=np.float32)
    model.add(Dense(16, weights=[H03_W_init, H03_B_init]))
    model.add(Activation('relu'))
    
    ## layer 4
    H04_W_init =  (0.06/2.0)*scipy.stats.truncnorm.rvs(-2.0, 2.0, size=(16, datareader.num_outputs())).astype(np.float32)
    H04_B_init = np.zeros(datareader.num_outputs(),dtype=np.float32)
    model.add(Dense(datareader.num_outputs(), weights=[H04_W_init, H04_B_init]))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.01, decay=0.0004, momentum=0.96)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    print("building/compiling theano model took %.2f sec" % (time.time()-t0),)
    sys.stdout.flush()

    for step_number in range(3000):
        t0 = time.time()
        train_features, train_labels = datareader.get_next_minibatch()
        model.train_on_batch(train_features, train_labels)
        print("step %3d took %.2f sec." % (step_number, time.time()-t0))
        sys.stdout.flush()

    print("Starting evaluation.")
    t0 = time.time()
    loss, validation_accuracy = model.test_on_batch(validation_features, validation_labels, accuracy=True, sample_weight=None)
    print("validation accuracy: %.2f%%" % (100.0*validation_accuracy,))
    print("evaluation took %.2f sec" % (time.time()-t0,))
Esempio n. 53
0
model.compile(loss=qri.mae_clip, optimizer=sgd)

# Use early stopping and saving as callbacks
early_stop = EarlyStopping(monitor='val_loss', patience=10)
save_best = ModelCheckpoint("models/%s.mdl" % MDL_NAME, save_best_only=True)
callbacks = [early_stop, save_best]

# Train model
t0 = time.time()
hist = model.fit(train_set[0], train_set[1], validation_data=valid_set,
                 verbose=2, callbacks=callbacks, nb_epoch=1000, batch_size=20)
time_elapsed = time.time() - t0

# Load best model
model.load_weights("models/%s.mdl" % MDL_NAME)

# Print time elapsed and loss on testing dataset
test_set_loss = model.test_on_batch(test_set[0], test_set[1])
print "\nTime elapsed: %f s" % time_elapsed
print "Testing set loss: %f" % test_set_loss

# Save results
qri.save_results("results/%s.out" % MDL_NAME, time_elapsed, test_set_loss)
qri.save_history("models/%s.hist" % MDL_NAME, hist.history)

# Plot training and validation loss
qri.plot_train_valid_loss(hist.history)

# Make predictions
qri.plot_test_predictions(model, train_set)
Esempio n. 54
0
def runCNN_Train(feature_path,
                 fold='config2_fold_1',
                 useType=['Spec', 'Ceps', 'GCoS'],
                 use_time_range=None):

    ############### Load and Preprocess Data ###############
    numVal = 30  #number of validation songs
    numTrain = 180  #number of training songs
    use_same_val = True  #weither to use the same validation set as last run
    use_same_train = True  #wether to use the same training set as last run
    numFrame = 5  #augment the time dimension of 5 (0.05s)

    val_fold = 'val_' + fold + '.txt'
    train_fold = 'train_' + fold + '.txt'
    #val_fold = Hack4LoadbyFold(numVal, 'test_%s.txt' % fold, 'val.txt', use_same=use_same_val) #you can specify your own fold
    #train_fold = Hack4LoadbyFold(numTrain, 'train_%s.txt' % fold, 'train.txt', use_same=use_same_train)

    print('Loading training data')
    train_data, train_label, train_index, tData_shape = LoadbyFold(
        train_fold, feature_path, use_type=useType, t_range=use_time_range)
    t_samples = tData_shape[0]

    print('\nLoading validation data')
    val_data, val_label, val_index, vData_shape = LoadbyFold(val_fold,
                                                             feature_path,
                                                             use_type=useType)
    v_samples = vData_shape[0]

    data_shape = (numFrame, tData_shape[2], tData_shape[3])

    print('\nInput shape: %s' % str(data_shape))

    ############### Define Model ###############
    initLR = 0.001
    num_middle_node = 512
    k_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.05)
    model_name = 'CNN_model_(' + fold + ')_' + str(useType) + '_' + str(
        use_time_range)

    model = Sequential()
    model.add(
        Conv2D(32, (5, 3),
               activation='selu',
               input_shape=data_shape,
               kernel_initializer=k_init))
    model.add(Conv2D(32, (1, 3), activation='selu', kernel_initializer=k_init))
    model.add(MaxPooling2D(pool_size=(1, 2)))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(
        Dense(num_middle_node, activation='selu', kernel_initializer=k_init))
    model.add(Dropout(0.5))
    model.add(
        Dense(num_middle_node, activation='selu', kernel_initializer=k_init))
    model.add(Dropout(0.5))
    model.add(Dense(88, activation='sigmoid'))

    optim = keras.optimizers.Adam(lr=initLR)
    model.compile(loss=keras.losses.binary_crossentropy,
                  optimizer=optim,
                  metrics=[st.Precision, st.Recall, st.Fscore])

    ############### Training Field ###############
    epoch = 20  #max epochs for training
    batchSize = 100
    v_batchSize = 500
    break_patience = 4  #early stop epochs. If val_loss doesn't decrease for given epochs, the training will stop.
    t_idx = np.random.permutation(t_samples)
    v_idx = np.random.permutation(v_samples)
    train_batches = int(np.ceil(t_samples / batchSize))
    val_batches = int(np.ceil(v_samples / v_batchSize))
    history = {}
    history['loss'] = []
    history['Fscore'] = []
    history['val_loss'] = []
    history['val_Fscore'] = []

    patience = 0
    best_f = 0
    best_v = 100000
    best_epoch = 0

    print('Using ' + str(useType) + ' for training')
    print('\nTrain on %d samples - Validate on %d samples' %
          (t_samples, v_samples))
    for e in range(epoch):
        cur_ep = 'Epoch %d/%d - ' % (e + 1, epoch)
        loss = 0
        fscore = 0
        recall = 0
        precision = 0
        for i in range(train_batches):
            sel_t = t_idx[i * batchSize:(i + 1) * batchSize]
            if i == train_batches - 1:
                sel_t = t_idx[i * batchSize:]

            data, label = preCNN_processBatch(train_data, train_label,
                                              train_index, sel_t, numFrame)
            scalar = model.train_on_batch(data, label)

            loss += scalar[0]
            fscore += scalar[3]
            recall += scalar[2]
            precision += scalar[1]

            batch_info = cur_ep + '%d/%d - ' % (i + 1, train_batches)
            batch_info += 'loss: %.4f - Precision: %.4f - Recall: %.4f - Fscore: %.4f' % (
                loss / (i + 1), precision / (i + 1), recall / (i + 1), fscore /
                (i + 1))
            if i == (train_batches - 1):
                print(batch_info)
                history['loss'].append(loss / train_batches)
                history['Fscore'].append(fscore / train_batches)
            else:
                print(batch_info, end='\r')

        ### Validation ###
        v_loss = 0
        v_fscore = 0
        for i in range(val_batches):
            print('Validation progress: %d/%d' % (i + 1, val_batches),
                  end='\r')

            sel_v = v_idx[i * v_batchSize:(i + 1) * v_batchSize]
            if i == val_batches - 1:
                sel_v = v_idx[i * v_batchSize:]

            data, label = preCNN_processBatch(val_data, val_label, val_index,
                                              sel_v, numFrame)
            #data = preCNN(combineFrame_2(val_data, numFrame, sample_per_song=3000, sel_t), channel=3)[:,:,:,use_specific_channel]

            v_scalar = model.test_on_batch(data, label)

            v_loss += v_scalar[0]
            v_fscore += v_scalar[3]
        print('Validation - loss: %.4f - F-score: %.4f' %
              (v_loss / val_batches, v_fscore / val_batches))
        print(
            '-------------------------------------------------------------------------------------------'
        )

        history['val_loss'].append(v_loss / val_batches)
        history['val_Fscore'].append(v_fscore / val_batches)

        ### Early Stop Check (according to val_loss) ###
        if v_loss >= best_v:
            patience += 1
        else:
            patience = 0
            best_epoch = e
            best_v = v_loss
            best_f = v_fscore
            model.save('./Result/%s.hdf5' % model_name)
        if patience == break_patience:
            print('Early Stopped')
            break

    info = 'Best epoch: %d - Loss: %.4f - F-score: %.4f\n' % (best_epoch + 1, (
        best_v / val_batches), (best_f / val_batches))
    print(info)
    log = open('log.txt', 'a')
    log.write(fold + str(useType) + '\n')
    log.write(info + '\n\n')
    log.close()

    #Save training and validation history
    out_history = h5py.File(
        "History/CNN_history(" + fold + str(useType) + ").hd5", 'w')
    out_history.create_dataset('loss', data=history['loss'])
    out_history.create_dataset('Fscore', data=history['Fscore'])
    out_history.create_dataset('val_loss', data=history['val_loss'])
    out_history.create_dataset('val_Fscore', data=history['val_Fscore'])
    out_history.close()

    return info, model
def CNN():
	input_frames=10
	batch_size = 32
	nb_classes = 20
	nb_epoch = 200
	img_rows, img_cols = 224,224
	img_channels = 2*input_frames

	print 'X_sample: '+str(X_sample.shape)
	print 'X_test: '+str(X_test.shape)
	print 'Y_test: '+str(Y_test.shape)


	print 'Preparing architecture...'

	model = Sequential()

	model.add(Convolution2D(96, 7, 7, border_mode='same',input_shape=(img_channels, img_rows, img_cols)))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(256, 5, 5, border_mode='same'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(512, 3, 3, border_mode='same'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))

	model.add(Convolution2D(512, 3, 3, border_mode='same'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))

	model.add(Convolution2D(512, 3, 3, border_mode='same'))
	model.add(BatchNormalization())
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Flatten())
	model.add(Dense(2048))
	fc_output=Activation('relu')
	model.add(fc_output)
	model.add(Dropout(0.5))
	model.add(Dense(2048))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	model.add(Dense(nb_classes))
	softmax_output=Activation('softmax')
	model.add(softmax_output)



	print 'Starting with training...'
	gc.collect()
	sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
	model.compile(loss={'output1':'categorical_crossentropy'}, optimizer=sgd)

	print("Using real time data augmentation")

	datagen = ImageDataGenerator(
		featurewise_center=True,  # set input mean to 0 over the dataset
		samplewise_center=False,  # set each sample mean to 0
		featurewise_std_normalization=True,  # divide inputs by std of the dataset
		samplewise_std_normalization=False,  # divide each input by its std
		zca_whitening=False,  # apply ZCA whitening
		rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
		width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
		height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
		horizontal_flip=True,  # randomly flip images
		vertical_flip=True)  # randomly flip images

	# compute quantities required for featurewise normalization
	# (std, mean, and principal components if ZCA whitening is applied)
	datagen.fit(X_sample)

	for e in range(nb_epoch):
		print('-'*40)
		print('Epoch', e)
		print('-'*40)
		print("Training...")
		# batch train with realtime data augmentation
		progbar = generic_utils.Progbar(X_train.shape[0])
		for X_train, Y_train in getTrainData():
			for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=batch_size):
				loss = model.train_on_batch(X_batch, Y_batch, accuracy=True)
				progbar.add(X_batch.shape[0], values=[("train loss", loss[0]),("train accuracy", loss[1])])
				fc_output
				softmax_output

		print('Saving layer representation and saving weights...')

		with h5py.File('fc_output.h5', 'w') as hf:
			hf.create_dataset('fc_output', data=fc_output)

		with h5py.File('softmax_output.h5', 'w') as hf:
			hf.create_dataset('softmax_output', data=softmax_output)

		model.save_weights('temporal_stream_model.h5')

		print("Testing...")
		# test time!
		progbar = generic_utils.Progbar(X_test.shape[0])
		for X_test, Y_test in getTestData():
			for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=batch_size):
				score = model.test_on_batch(X_batch, Y_batch, accuracy=True)
				progbar.add(X_batch.shape[0], values=[("test loss", score[0]),("test accuracy", score[1])])
Esempio n. 56
0
            height_shift_range=0,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=False,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        for e in range(nb_epoch):
            print('-'*40)
            print('Epoch', e)
            print('-'*40)
            print("Training...")
            # batch train with realtime data augmentation
            progbar = generic_utils.Progbar(X_train.shape[0])
            for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=batch_size):
                score, trainAccu = model.train_on_batch(X_batch, Y_batch, accuracy=True)
                progbar.add(X_batch.shape[0], values=[("train accuracy", trainAccu)])

            print("Testing...")
            # test time!
            progbar = generic_utils.Progbar(X_test.shape[0])
            for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=batch_size):
                score, testAccu = model.test_on_batch(X_batch, Y_batch, accuracy=True)
                progbar.add(X_batch.shape[0], values=[("test accuracy", testAccu)])
    trainScores.append(trainAccu)
    testScores.append(testAccu)
scipy.io.savemat('cnn_results', {'trainAccu': trainScores, 'testAccu': testScores})
print ('Average train accuracies: {0}'.format(np.mean(trainScores)))
print ('Average test accuracies: {0}'.format(np.mean(testScores)))