Esempio n. 1
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](2)
        net.add_layer(layer)

        layer = ld.odict["softmax"](2)
        net.add_layer(layer)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.AdamParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        net.set_l2_loss_coeff(.001)

        for i in range(10000):
            x = (np.random.rand(1, 32) * 0.1 + 0.75)
            y = x < 0.8
            y = np.append(y, x >= 0.8, axis=0)
            y = np.array(y, dtype=float)
            net.train(x, y)

        x = 0.79
        self.assertTrue(net.predict_classify(x) == 0)
        x = 0.81
        self.assertTrue(net.predict_classify(x) == 1)
Esempio n. 2
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](10)
        net.add_layer(layer)

        layer = ld.hdict["fc"](1)
        net.add_layer(layer)

        layer = ld.odict["sigmoid"](1)
        net.add_layer(layer)

        net.set_l2_loss_coeff(.001)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        a = 0.8

        for i in range(10000):
            x = (np.random.rand(1, 32) * 0.1 + 0.75)
            y = x > a
            net.train(x, y)

        x = 0.79
        # print(net.predict(x))
        self.assertTrue(net.predict(x) < 0.5)
        x = 0.81
        # print(net.predict(x))
        self.assertTrue(net.predict(x) > 0.5)
Esempio n. 3
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 4)

        layer = ld.hdict["fc"](10)
        net.add_layer(layer)

        layer = ld.hdict["fc"](40)
        net.add_layer(layer)

        layer = ld.hdict["fc"](4)
        net.add_layer(layer)

        layer = ld.odict["loss"]("linear_mean_squared_loss")
        net.add_layer(layer)

        net.set_l2_loss_coeff(.001)        

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.01)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        a = np.array([[1], [2], [3], [4]])
        b = np.array([[4], [5], [6], [7]])

        for i in range(1000):
            x = (np.random.rand(4,32) - 0.5) * 10 
            y = a * x + b
            net.train(x,y)
        
        x = np.array([[10], [10], [10], [10]])
        y_exp = a * x + b
        self.assertTrue(((np.absolute(net.predict(x) - y_exp)/y_exp) < 0.1).all())
Esempio n. 4
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](1)
        net.add_layer(layer)

        layer = ld.odict["loss"]("linear_mean_squared_loss")
        net.add_layer(layer)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        net.set_l2_loss_coeff(.001)        

        a = 4.5;

        for i in range(100):
            x = (np.random.rand(1,32) - 0.5) * 10
            y = a * x
            net.train(x,y)
        
        x = 10
        self.assertTrue((np.absolute(net.predict(x) - a*x)/a*x) < 0.1)
Esempio n. 5
0
    def define_nn(self):
        self.net = nn.NeuralNetwork("test_net", 3)

        self.layer = ld.hdict["fc"](4)
        self.net.add_layer(self.layer)

        self.layer = ld.odict["softmax"](4)
        self.net.add_layer(self.layer)
    def define_nn(self):
        self.net = nn.NeuralNetwork("test_net", 1)

        self.layer = ld.hdict["fc"](1)
        self.net.add_layer(self.layer)

        self.layer = ld.odict["sigmoid"](1)
        self.net.add_layer(self.layer)
    def define_nn(self):
        self.net = nn.NeuralNetwork("test_net", 1)

        self.layer = ld.hdict["fc"](1)
        self.net.add_layer(self.layer)

        self.layer = ld.odict["loss"]("linear_mean_squared_loss")
        self.net.add_layer(self.layer)
    def define_nn(self):
        self.net = nn.NeuralNetwork("test_net", 1)

        self.layer = ld.hdict["fc"](1)
        self.net.add_layer(self.layer)

        self.layer = ld.hdict["sigmoid"](1)
        self.net.add_layer(self.layer)

        self.layer = ld.odict["loss"]("sigmoid_cross_entropy_loss")
        self.net.add_layer(self.layer)
Esempio n. 9
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](30)
        net.add_layer(layer)

        layer = ld.hdict["fc"](10)
        net.add_layer(layer)

        layer = ld.odict["softmax"](10)
        net.add_layer(layer)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.AdamParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        net.set_l2_loss_coeff(.001)

        for i in range(100000):
            x = np.random.rand(1, 32)
            y = np.array(x < 0.1)
            for j in range(1, 10):
                low = j * 0.1
                high = (j + 1) * 0.1
                yj = (x >= low) * (x < high)
                y = np.append(y, yj, axis=0)
            y = np.array(y, dtype=float)
            net.train(x, y)

        for j in range(0, 10):
            xl = j * 0.1 + .02
            xh = j * 0.1 + .08
            self.assertTrue(net.predict_classify(xl) == j)
            self.assertTrue(net.predict_classify(xh) == j)
Esempio n. 10
0
validation_x = np.transpose(validation_data[:, 0:784])
validation_y_class = np.transpose(validation_data[:, 794])
val_acc = lambda: net.classification_accuracy(validation_x, validation_y_class)

test_x = np.transpose(test_data[:, 0:784])
test_y_class = np.transpose(test_data[:, 794])
test_acc = lambda: net.classification_accuracy(test_x, test_y_class)

##

# Creating Neural Network
print("Creating neural network")
print()
# Step 1: Create Network - specify input layer neurons (28x28=784)
net = nn.NeuralNetwork("test_net", 784)

# Step 2: Add hidden layers in sequence

# Fully connected layer
layer = ld.hdict["fc"](800)
net.add_layer(layer)

# Relu activation layer
layer = ld.hdict["relu"](800)
net.add_layer(layer)

layer = ld.hdict["fc"](10)
net.add_layer(layer)

# Add output layer