Ejemplo n.º 1
0
def read_cifar_10(n_train=None, n_val=None, n_test=None):
    import sys, pathlib
    sys.path.append(
        str(pathlib.Path(__file__).resolve().parents[1]) +
        "/Toy-DeepLearning-Framework/")
    from mlp.utils import LoadXY

    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    for i in [2, 3, 4, 5]:
        x, y = LoadXY("data_batch_" + str(i))
        x_train = np.concatenate((x_train, x), axis=1)
        y_train = np.concatenate((y_train, y), axis=1)
    x_val = x_train[:, -1000:]
    y_val = y_train[:, -1000:]
    x_train = x_train[:, :-1000]
    y_train = y_train[:, :-1000]
    x_test, y_test = LoadXY("test_batch")

    if n_train is not None:
        x_train = x_train[..., 0:n_train]
        y_train = y_train[..., 0:n_train]
    if n_val is not None:
        x_val = x_val[..., 0:n_val]
        y_val = y_val[..., 0:n_val]
    if n_test is not None:
        x_test = x_test[..., 0:n_test]
        y_test = y_test[..., 0:n_test]

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # reshaped_train = np.zeros((32, 32, 3, x_train.shape[-1]))
    # for i in range(x_train.shape[-1]):
    #     flatted_image = np.array(x_train[..., i])
    #     image = np.reshape(flatted_image,  (32, 32, 3), order='F')
    #     cv2.imshow("image", image)
    #     cv2.waitKey()

    x_train = np.reshape(np.array(x_train), (32, 32, 3, x_train.shape[-1]),
                         order='F')
    x_val = np.reshape(np.array(x_val), (32, 32, 3, x_val.shape[-1]),
                       order='F')
    x_test = np.reshape(np.array(x_test), (32, 32, 3, x_test.shape[-1]),
                        order='F')

    return x_train.astype(float), y_train.astype(float), x_val.astype(
        float), y_val.astype(float), x_test.astype(float), y_test.astype(float)
Ejemplo n.º 2
0
    str(pathlib.Path(__file__).resolve().parents[1]) +
    "/Toy-DeepLearning-Framework/")

import numpy as np
from mlp.callbacks import MetricTracker, BestModelSaver, LearningRateScheduler
from mlp.layers import Dense, Softmax, Relu, Dropout
from mlp.losses import CrossEntropy
from mlp.models import Sequential
from mlp.metrics import Accuracy
from mlp.utils import LoadXY

np.random.seed(1)

if __name__ == "__main__":
    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    x_val, y_val = LoadXY("data_batch_2")
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x
    x_test = (x_test - mean_x) / std_x

    # Define model
    model = Sequential(loss=CrossEntropy(), metric=Accuracy())
    model.add(Dense(nodes=800, input_dim=x_train.shape[0]))
    model.add(Relu())
    model.add(Dropout(ones_ratio=0.50))
    subtitle = "l2_reg: " + str(
        kwargs["l2_reg"]) + ", Test Acc: " + str(test_acc)
    mt.plot_training_progress(show=False,
                              save=True,
                              name=figure_file,
                              subtitle=subtitle)

    # Maximizing value: validation accuracy
    # val_metric = bms.best_metric
    val_metric = model.get_metric_loss(x_val, y_val)[0]
    return val_metric


if __name__ == "__main__":
    # Load data
    x_train, y_train = LoadXY("data_batch_1")
    for i in [2, 3, 4, 5]:
        x, y = LoadXY("data_batch_" + str(i))
        x_train = np.concatenate((x_train, x), axis=1)
        y_train = np.concatenate((y_train, y), axis=1)
    x_val = x_train[:, -5000:]
    y_val = y_train[:, -5000:]
    x_train = x_train[:, :-5000]
    y_train = y_train[:, :-5000]
    x_test, y_test = LoadXY("test_batch")

    # Preprocessing
    mean_x = np.mean(x_train)
    std_x = np.std(x_train)
    x_train = (x_train - mean_x) / std_x
    x_val = (x_val - mean_x) / std_x