Exemple #1
0
def main():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    print 'Imported MNIST data: training input %s and training labels %s.' % (
        x_train.shape, y_train.shape)
    print 'Imported MNIST data: test input %s and test labels %s.' % (
        x_test.shape, y_test.shape)

    N, H, W = x_train.shape
    x = x_train.reshape((N, H * W)).astype('float') / 255
    y = to_categorical(y_train, num_classes=10)

    model = Sequential()
    model.add(Dense(), ReLU(), layer_dim=(28 * 28, 300), weight_scale=1e-2)
    model.add(Dense(), ReLU(), layer_dim=(300, 100), weight_scale=1e-2)
    model.add(Dense(), Softmax(), layer_dim=(100, 10), weight_scale=1e-2)

    model.compile(optimizer=GradientDescent(learning_rate=1e-2),
                  loss_func=categorical_cross_entropy)
    model.fit(x, y, epochs=10, batch_size=50, verbose=False)

    N, H, W = x_test.shape
    x = x_test.reshape((N, H * W)).astype('float') / 255
    y = to_categorical(y_test, num_classes=10)

    model.evaluate(x, y)
Exemple #2
0
def linear_classification(a=1.0, b=0.0, graph=False):

    # prepare data
    x = np.linspace(-100, 100, 200)
    y = a * x + b
    X = np.array(list(zip(x, y))) + np.random.randn(200, 2) * 100
    Y = to_one_hot(np.where(a * X[:, 0] + b > X[:, 1], 1, 0))
    (train_x, train_y), (test_x, test_y) = split_data(X,
                                                      Y,
                                                      ratio=0.8,
                                                      random=True)

    # build simple FNN
    i = Input(2)
    x = Dense(2, activation='softmax')(i)

    # define trainer
    trainer = Trainer(loss='cross_entropy',
                      optimizer=Adam(learning_rate=0.05),
                      batch_size=50,
                      epochs=50,
                      metrics=['accuracy'])

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    model.fit(train_x, train_y)
    print(model.evaluate(test_x, test_y))

    if graph:
        plt.plot(model.history['loss'])
        plt.show()

        # predict
        y_hat = model.predict(test_x)
        y_hat = np.argmax(y_hat, axis=1)
        simple_plot(test_x, y_hat, a, b)
Exemple #3
0
train_y = convert_to_one_hot(train_y, num_classes)
test_x = np.reshape(test_x, (len(test_x), 1, img_rows, img_cols)).astype(skml_config.config.i_type)
test_y = convert_to_one_hot(test_y, num_classes)

train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y)


filters = 64
model = Sequential()
model.add(Convolution(filters, 3, input_shape=input_shape))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPooling(2))
model.add(Convolution(filters, 3))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling())
model.add(Affine(num_classes))
model.compile(SoftmaxCrossEntropy(), Adam())

train_batch_size = 100
valid_batch_size = 1
print("訓練開始: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))
model.fit(train_x, train_y, train_batch_size, 20, validation_data=(valid_batch_size, valid_x, valid_y), validation_steps=1)
print("訓練終了: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))

model.save(save_path)

loss, acc = model.evaluate(test_x, test_y)
print("Test loss: {}".format(loss))
print("Test acc: {}".format(acc))