def run():
    file_path = os.path.dirname(
        os.path.realpath(__file__)) + "/dlmb_mnist_example.json"

    # If a file of the neural-net model's architexture already exists,
    # then there is no need to build a new model.
    if os.path.isfile(file_path):

        # load the model and get its predictions based on x_test
        nn_model = Sequential()
        nn_model.load(file_path)

        predictions = nn_model.predict(x_test)

        # compare the predictions to the correct labels
        print(
            f"This model got a {validate_model(predictions, y_test)/100}% accuracy"
        )

    # If the file doesn't exist then we need to build a neural-net model and train it.
    else:

        # Build the neural-net model
        nn_model = Sequential([
            Dense(
                128, 784, activation="ReLU"
            ),  # for the layer_dim we want 128 outputs and 784 inputs (each pixel on the image)
            Batchnorm(128),
            Dense(128, 128, activation="ReLU"),
            Batchnorm(128),
            Dense(32, 128, activation="ReLU"),
            Batchnorm(32),
            Dense(10, 32, activation="Softmax"
                  )  # We have 10 nodes in the layer for each number from 0 - 9
        ])

        nn_model.build(loss="crossentropy", optimizer="adam")
        # Crossentropy is a good loss function when you are doing logistic regression (classification)
        # Adam is one of the most popular optimizers

        nn_model.train(x_train, y_train, epochs=10, batch_size=1000)
        # Train the model
        # We go through the data 10 times and split the data of 60000 samples into 1000 sized batches leaving 60 samples

        # Now we save the model so we can use it again without re-training
        nn_model.save(file_path)  # When saving, files must end in .json
Exemplo n.º 2
0
train_y = convert_to_one_hot(train_y, num_classes)
test_x = np.reshape(test_x, (len(test_x), 1, img_rows, img_cols)).astype(skml_config.config.i_type)
test_y = convert_to_one_hot(test_y, num_classes)

train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y)


filters = 64
model = Sequential()
model.add(Convolution(filters, 3, input_shape=input_shape))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPooling(2))
model.add(Convolution(filters, 3))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling())
model.add(Affine(num_classes))
model.compile(SoftmaxCrossEntropy(), Adam())

train_batch_size = 100
valid_batch_size = 1
print("訓練開始: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))
model.fit(train_x, train_y, train_batch_size, 20, validation_data=(valid_batch_size, valid_x, valid_y), validation_steps=1)
print("訓練終了: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))

model.save(save_path)

loss, acc = model.evaluate(test_x, test_y)
print("Test loss: {}".format(loss))
print("Test acc: {}".format(acc))
Exemplo n.º 3
0
          bias_initializer=zeros))
model.add(Sigmoid())
model.add(
    Dense(10, 16, kernel_initializer=truncated_normal, bias_initializer=zeros))
model.add(Sigmoid())

loss = SquaredError()

loss_history = model.fit(train_imgs,
                         train_labels_one_hot,
                         batch_size=32,
                         epochs=10,
                         loss=loss,
                         halt=False)
pred = model.predict(test_imgs)
pred_labels = pred.argmax(1)
print("MSE", loss.evaluate(pred, test_labels_one_hot).mean(0))
print("Percentage correct", np.mean(pred_labels == test_labels) * 100)
print("Prediction for first 5 images")
print(pred[0:5, :].argmax(1))
print("True labels")
print(test_labels[0:5])

plt.plot(np.arange(0, 10), loss_history.mean(1))
plt.title("Graph of mean loss over all one-hot outputs")
plt.xlabel("Epoch")
plt.ylabel("Mean loss")
plt.show()

print(model.save("mnist_model.pkl"))