Esempio n. 1
0
    def test_backward(self):
        inputs = np.random.randn(6, 10)

        net = NeuralNet(model=self.model3)

        linear1 = Linear(10, 6)
        linear1.weights = net.layers[0].weights
        linear1.bias = net.layers[0].bias

        linear2 = Linear(6, 5)
        linear2.weights = net.layers[1].weights
        linear2.bias = net.layers[1].bias

        # first calculate thte forward pass
        fwd = linear1.forward(inputs)
        fwd = linear2.forward(fwd)

        grad_output = np.random.randn(6, 5)

        # now we can calculate the backward pass
        grad_correct, _, _ = linear2.backward(grad_output)
        grad_correct, _, _ = linear1.backward(grad_correct)

        net.forward(inputs)
        grad = net.backward(grad_output)

        self.assertTrue(
            np.array_equal(grad, grad_correct),
            msg=
            "Net backward pass is the same as backward of the layers combined")
Esempio n. 2
0
    def test_save_and_load_weights(self):
        # create a net with random weights
        net = NeuralNet(model=self.model1)
        w = net.layers[0].weights
        b = net.layers[0].bias

        # save weights
        net.save_weights(self.weights_fname)

        # create a new net with random weights
        net = NeuralNet(model=self.model1)

        # load saved weights
        net.load_weights(self.weights_fname)
        new_w = net.layers[0].weights
        new_b = net.layers[0].bias

        # check that weights are equal
        self.assertTrue(np.alltrue(w == new_w),
                        msg="Saving and loading weights works")
        self.assertTrue(np.alltrue(b == new_b),
                        msg="Saving and loading biases works")

        # remove the temporarily saved weights
        os.remove(self.weights_fname)
Esempio n. 3
0
    def test_forward(self):
        inputs = np.random.randn(6, 10)

        net = NeuralNet(model=self.model3)

        linear1 = Linear(10, 6)
        linear1.weights = net.layers[0].weights
        linear1.bias = net.layers[0].bias

        linear2 = Linear(6, 5)
        linear2.weights = net.layers[1].weights
        linear2.bias = net.layers[1].bias

        correct = linear1.forward(inputs)
        correct = linear2.forward(correct)

        x = net.forward(inputs)

        self.assertTrue(
            np.array_equal(x, correct),
            msg="Net forward pass is the same as forward of the layers combined"
        )
Esempio n. 4
0
    def test_save_and_load_weights_fails(self):
        # create a net with random weights
        net = NeuralNet(model=self.model1)
        w = net.layers[0].weights
        b = net.layers[0].bias

        # save weights
        net.save_weights(self.weights_fname)

        # create a new net with random weights
        net = NeuralNet(model=self.model2)

        # load saved weights - this should fail
        self.assertRaises(RuntimeError, net.load_weights, self.weights_fname)

        # remove the temporarily saved weights
        os.remove(self.weights_fname)
Esempio n. 5
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights_fname',
                        default='../data/saved_weights.dat',
                        help='Path to the file containing the weights to be used (../data/saved_weights.dat)')

    args = parser.parse_args()
    weights_fname = args.weights_fname

    if not os.path.isfile(weights_fname):
        print("Error: File '{}' does not exist. Did you run `./train.py --save`?".format(weights_fname))
        sys.exit()

    # create the model
    model = TwoLayerModel()
    net = NeuralNet(model=model)
    net.load_weights(weights_fname)

    # we are not training, but predicting
    net.train = False

    # load the data
    ((x_train, y_train), (x_valid, y_valid), (x_test, y_test)) = MNISTDataLoader().load_data()
    test_images = np.reshape(x_test, (-1, 28, 28))

    # print accuracies
    print_set_accuracy(net, x_train, y_train, "Training set accuracy:")
    print_set_accuracy(net, x_valid, y_valid, "Validation set accuracy:")
    print_set_accuracy(net, x_test, y_test, "Test set accuracy:")

    # loop forever to display random images from the set
Esempio n. 6
0
                        help='Save weights to file after training')

    parser.add_argument(
        '--weights_fname',
        default='../data/saved_weights_linear.dat',
        help=
        'Path and filename for saving and loading the weights (../data/saved_weights_linear.dat)'
    )

    args = parser.parse_args()

    weights_fname = args.weights_fname

    # create the model
    model = LinearModel()
    net = NeuralNet(model=model)

    if args.load_weights:
        print('- Loading weights from:', weights_fname)
        net.load_weights(weights_fname)

    # create the optimizer
    optimizer = SGD(net=net,
                    dataloader=RegressionDataLoader(),
                    batch_size=args.batch_size)

    # fit the model
    print('- Training model for', args.epochs, 'epoch, with learning rate',
          args.lr)
    optimizer.fit(n_epochs=args.epochs, learning_rate=args.lr)
Esempio n. 7
0
 def setUp(self):
     n_samples = 100
     model = TwoLayerModel()
     self.net = NeuralNet(model=model)
     self.loader = MNISTDataLoader(n_samples=n_samples)
     self.optimizer = SGD(net=self.net, dataloader=self.loader, batch_size=n_samples)
Esempio n. 8
0
class TestSGD(unittest.TestCase):
    """
    Test SGD functionality using TwoLayerModel

    Just tests that the loss goes down after fitting the model.
    Uses small subset of the actual MNIST data for testing.
    """

    def setUp(self):
        n_samples = 100
        model = TwoLayerModel()
        self.net = NeuralNet(model=model)
        self.loader = MNISTDataLoader(n_samples=n_samples)
        self.optimizer = SGD(net=self.net, dataloader=self.loader, batch_size=n_samples)

    def test_fit_one_epoch(self):
        ((x_train, y_train), (x_valid, y_valid), _) = self.loader.load_data()

        y_pred_begin = self.net.forward(x_train)
        train_loss_begin = self.net.loss(y_pred_begin, y_train.astype(int))

        self.optimizer.fit(n_epochs=1, learning_rate=1e-1, suppress_output=True)

        y_pred_after = self.net.forward(x_train)
        train_loss_after = self.net.loss(y_pred_after, y_train)

        self.assertTrue(np.less(train_loss_after, train_loss_begin), msg="Loss is getting smaller in the beginning")

    def test_fit_two_epochs(self):
        ((x_train, y_train), (x_valid, y_valid), _) = self.loader.load_data()

        self.optimizer.fit(n_epochs=1, learning_rate=1e-1, suppress_output=True)

        y_pred_begin = self.net.forward(x_train)
        train_loss_begin = self.net.loss(y_pred_begin, y_train.astype(int))

        self.optimizer.fit(n_epochs=1, learning_rate=1e-1, suppress_output=True)

        y_pred_after = self.net.forward(x_train)
        train_loss_after = self.net.loss(y_pred_after, y_train)
        self.assertTrue(np.less(train_loss_after, train_loss_begin), msg="Loss is getting smaller after 1 epochs")

    def test_fit_five_epochs(self):
        ((x_train, y_train), (x_valid, y_valid), _) = self.loader.load_data()

        self.optimizer.fit(n_epochs=4, learning_rate=1e-1, suppress_output=True)

        y_pred_begin = self.net.forward(x_train)
        train_loss_begin = self.net.loss(y_pred_begin, y_train.astype(int))

        self.optimizer.fit(n_epochs=1, learning_rate=1e-1, suppress_output=True)

        y_pred_after = self.net.forward(x_train)
        train_loss_after = self.net.loss(y_pred_after, y_train)
        self.assertTrue(np.less(train_loss_after, train_loss_begin), msg="Loss is getting smaller after 4 epochs")