コード例 #1
0
 def setUpClass(cls):
     n_data = 10
     img_dim = 20
     train_data = load_batch("cifar-10-batches-py/data_batch_1")
     cls.X_mat = train_data["pixel_data"][:img_dim, :n_data]
     cls.Y_mat = conv_y_to_onehot_mat(train_data["labels"])[:, :n_data]
     cls.dtype = np.float64
     cls.in_dim = cls.X_mat.shape[0]
     cls.out_dim = cls.Y_mat.shape[0]
コード例 #2
0
    def setUpClass(cls):
        n_data = 100
        train_data = load_batch("cifar-10-batches-py/data_batch_1")
        cls.train_img = train_data["pixel_data"][:, :n_data].T
        cls.train_labels = train_data["labels"][:n_data]

        cls.params = {
            "n_epochs": 500,
            "lambda_": 0,
            "verbose": False,
            "lrate_scheme": {
                "scheme": "cyclic",
                "eta_lim": [1e-5, 1e-1],
                "step_size": 500
            }
        }
コード例 #3
0
    def setUpClass(cls):
        d = 20
        m = 15
        k = 10
        n = 5
        dtype = 'float64'
        cls.W_mat1 = np.random.randn(m, d).astype(dtype) * np.sqrt(1.0 / d)
        cls.W_mat2 = np.random.randn(k, m).astype(dtype) * np.sqrt(1.0 / m)
        cls.b_vec1 = np.random.randn(m, 1).astype(dtype)
        cls.b_vec2 = np.random.randn(k, 1).astype(dtype)
        train_data = load_batch("cifar-10-batches-py/data_batch_1")
        X_mat = train_data["pixel_data"][:d, :n]
        cls.X_mat = normalize_data(X_mat)['normalized'].astype(dtype)

        one_hot_mat = get_label_to_one_hot(train_data["labels"])
        cls.Y_mat = one_hot_mat[:k, :n].astype(dtype)
コード例 #4
0
"""
For Assignment 1:
"""
from utils.load_batch import load_batch
from clsr.one_layer_network import OneLayerNetwork
from time import time

if __name__ == '__main__':
    # load data
    train_data = load_batch("cifar-10-batches-py/data_batch_1")
    valid_data = load_batch("cifar-10-batches-py/data_batch_2")
    test_data = load_batch("cifar-10-batches-py/test_batch")
    ann = OneLayerNetwork()

    ann.set_valid_data(valid_data["pixel_data"].T, valid_data["labels"])
    # training time
    st = time()
    ann.fit(train_data["pixel_data"].T, train_data["labels"])
    ts = time() - st

    print("Total used time = ", ts)
    score = ann.score(test_data["pixel_data"].T, test_data["labels"])
    print("Accuracy: {}".format(score))
コード例 #5
0
2. The starting loss is around 2.24 and it is reasonable
3. Use the simplest scheduler i.e. StepLR
4. with / without batchnorm should behave the same
"""
import numpy as np
from utils.load_batch import cifar10_DataLoader
from utils.load_batch import load_batch
from utils.handle_data import data_split
from utils import train
from utils.lrate import StepLR
from utils.lrate import CyclicLR
from clsr.nn_kl import KLayerNeuralNetwork
from utils.preprocess import StandardScaler

if __name__ == "__main__":
    train_data = load_batch("cifar-10-batches-py/data_batch_1")
    net = KLayerNeuralNetwork(n_hidden_nodes=[50, 50],
                              p_dropout=0.0,
                              batch_norm=False,
                              batch_norm_momentum=.9)

    scaler = StandardScaler()
    scaler.fit(train_data['pixel_data'])
    train_data['pixel_data'] = scaler.transform(train_data['pixel_data'])

    batch_size = 100
    train_loader = cifar10_DataLoader(train_data,
                                      batch_size=batch_size,
                                      shuffle=False)

    ntrain = train_data['labels'].shape[0]
コード例 #6
0
from clsr.two_layer_network import TwoLayerNetwork
from utils.load_batch import load_batch
from utils.handle_data import data_split

if __name__ == "__main__":
    # settings
    n_cycle = 3  # will overshot when too much cycles
    k = 3
    # lambda_ = 0.000142
    # lambda_ = 0.005
    lambda_ = 0.004454
    # ===============================================================
    # READ DATA
    merged_data = get_all_train_data("cifar-10-batches-py")
    train_data, valid_data = data_split(merged_data, n_valid=1000)
    test_data = load_batch("cifar-10-batches-py/test_batch")
    # ===============================================================
    n_train_data = len(train_data['labels'])
    n_batch = 100
    n_s = int(k * np.floor(n_train_data / n_batch))
    n_iters = 2 * n_s * n_cycle
    n_epochs = int(n_iters * n_batch / n_train_data)
    # ======================================================
    params = {
        "n_epochs": n_epochs,
        "n_batch": n_batch,
        "verbose": True,
        "lambda_": lambda_,
        "record_training": True,
        "lrate_scheme": {
            "scheme": "cyclic",