Exemple #1
0
def adam(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True,
         X_test=None, y_test=None):
    beta1 = 0.9
    beta2 = 0.999
    minibatches = get_minibatches(X_train, y_train, minibatch_size)
    for i in range(epoch):
        loss = 0
        velocity, cache = [], []
        for param_layer in nnet.params:
            p = [np.zeros_like(param) for param in list(param_layer)]
            velocity.append(p)
            cache.append(p)
        if verbose:
            print("Epoch {0}".format(i + 1))
        t = 1
        for X_mini, y_mini in minibatches:
            loss, grads = nnet.train_step(X_mini, y_mini)
            for c, v, param, grad, in zip(cache, velocity, nnet.params, reversed(grads)):
                for i in range(len(grad)):
                    c[i] = beta1 * c[i] + (1. - beta1) * grad[i]
                    v[i] = beta2 * v[i] + (1. - beta2) * (grad[i]**2)
                    mt = c[i] / (1. - beta1**(t))
                    vt = v[i] / (1. - beta2**(t))
                    param[i] += - learning_rate * mt / (np.sqrt(vt) + 1e-4)
            t += 1

        if verbose:
            train_acc = accuracy(y_train, nnet.predict(X_train))
            test_acc = accuracy(y_test, nnet.predict(X_test))
            print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".format(
                loss, train_acc, test_acc))
    return nnet
Exemple #2
0
def sgd_momentum(nnet,
                 X_train,
                 y_train,
                 minibatch_size,
                 epoch,
                 learning_rate,
                 mu=0.9,
                 verbose=True,
                 X_test=None,
                 y_test=None,
                 nesterov=True):

    minibatches = get_minibatches(X_train, y_train, minibatch_size)

    for i in range(epoch):
        loss = 0
        velocity = []
        for param_layer in nnet.params:
            p = [np.zeros_like(param) for param in list(param_layer)]
            velocity.append(p)

        if verbose:
            print("Epoch {0}".format(i + 1))

        for X_mini, y_mini in minibatches:

            if nesterov:
                for param, ve in zip(nnet.params, velocity):
                    for i in range(len(param)):
                        param[i] += mu * ve[i]

            loss, grads = nnet.train_step(X_mini, y_mini)
            momentum_update(velocity,
                            nnet.params,
                            grads,
                            learning_rate=learning_rate,
                            mu=mu)

        if verbose:
            m_train = X_train.shape[0]
            m_test = X_test.shape[0]
            y_train_pred = np.array([], dtype="int64")
            y_test_pred = np.array([], dtype="int64")
            for i in range(0, m_train, minibatch_size):
                X_tr = X_train[i:i + minibatch_size, :, :, :]
                y_tr = y_train[i:i + minibatch_size, ]
                y_train_pred = np.append(y_train_pred, nnet.predict(X_tr))
            for i in range(0, m_test, minibatch_size):
                X_te = X_test[i:i + minibatch_size, :, :, :]
                y_te = y_test[i:i + minibatch_size, ]
                y_test_pred = np.append(y_test_pred, nnet.predict(X_te))

            train_acc = accuracy(y_train, y_train_pred)
            test_acc = accuracy(y_test, y_test_pred)
            print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".
                  format(loss, train_acc, test_acc))
    return nnet
Exemple #3
0
def sgd(nnet, X_train, y_train, minibatch_size, epoch, learning_rate, verbose=True,
        X_test=None, y_test=None):
    minibatches = get_minibatches(X_train, y_train, minibatch_size)
    for i in range(epoch):
        loss = 0
        if verbose:
            print("Epoch {0}".format(i + 1))
        for X_mini, y_mini in minibatches:
            loss, grads = nnet.train_step(X_mini, y_mini)
            vanilla_update(nnet.params, grads, learning_rate=learning_rate)
        if verbose:
            train_acc = accuracy(y_train, nnet.predict(X_train))
            test_acc = accuracy(y_test, nnet.predict(X_test))
            print("Loss = {0} | Training Accuracy = {1} | Test Accuracy = {2}".format(
                loss, train_acc, test_acc))
    return nnet
Exemple #4
0
        # test_set = list(test_set)
        # test_set = random.samples(test_set, len(test_set)/2)
        X_test, y_test = test_set

        # ------------------------- input image
        # X_test = cv2.imread('1.jpeg',mode='RGB')
        # print(y_test)
        # ------------------------- END

        mnist_dims = (1, 28, 28)
        cnn = CNN(make_mnist_cnn(mnist_dims, num_class=10))
        # cnn = sgd_momentum(cnn, X, y, minibatch_size=35, epoch=1,
        # learning_rate=0.01, X_test=X_test, y_test=y_test)
        hasil = cnn.predict(X_test)
        hasil = np.argmax(hasil, axis=1)
        ndas = accuracy(y_test, hasil)
        print("Test Accuracy = {0}".format(ndas))

    if sys.argv[1] == "cifar10":
        training_set, test_set = load_cifar10('data/cifar-10',
                                              num_training=1000,
                                              num_test=100)
        X, y = training_set
        X_test, y_test = test_set
        cifar10_dims = (3, 32, 32)
        cnn = CNN(make_cifar10_cnn(cifar10_dims, num_class=10))
        cnn = sgd_momentum(cnn,
                           X,
                           y,
                           minibatch_size=10,
                           epoch=200,