예제 #1
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](10)
        net.add_layer(layer)

        layer = ld.hdict["fc"](1)
        net.add_layer(layer)

        layer = ld.odict["sigmoid"](1)
        net.add_layer(layer)

        net.set_l2_loss_coeff(.001)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        a = 0.8

        for i in range(10000):
            x = (np.random.rand(1, 32) * 0.1 + 0.75)
            y = x > a
            net.train(x, y)

        x = 0.79
        # print(net.predict(x))
        self.assertTrue(net.predict(x) < 0.5)
        x = 0.81
        # print(net.predict(x))
        self.assertTrue(net.predict(x) > 0.5)
예제 #2
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 1)

        layer = ld.hdict["fc"](1)
        net.add_layer(layer)

        layer = ld.odict["loss"]("linear_mean_squared_loss")
        net.add_layer(layer)

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.1)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        net.set_l2_loss_coeff(.001)        

        a = 4.5;

        for i in range(100):
            x = (np.random.rand(1,32) - 0.5) * 10
            y = a * x
            net.train(x,y)
        
        x = 10
        self.assertTrue((np.absolute(net.predict(x) - a*x)/a*x) < 0.1)
예제 #3
0
    def test(self):
        net = nn.NeuralNetwork("test_net", 4)

        layer = ld.hdict["fc"](10)
        net.add_layer(layer)

        layer = ld.hdict["fc"](40)
        net.add_layer(layer)

        layer = ld.hdict["fc"](4)
        net.add_layer(layer)

        layer = ld.odict["loss"]("linear_mean_squared_loss")
        net.add_layer(layer)

        net.set_l2_loss_coeff(.001)        

        np.random.seed(1)

        learning_rate = wup.LearningRate(alpha=0.01)
        params = wup.GradientDescentParams(learning_rate)
        net.set_weight_update_function(params)
        net.initialize_parameters()

        a = np.array([[1], [2], [3], [4]])
        b = np.array([[4], [5], [6], [7]])

        for i in range(1000):
            x = (np.random.rand(4,32) - 0.5) * 10 
            y = a * x + b
            net.train(x,y)
        
        x = np.array([[10], [10], [10], [10]])
        y_exp = a * x + b
        self.assertTrue(((np.absolute(net.predict(x) - y_exp)/y_exp) < 0.1).all())
예제 #4
0
    def test(self):
        self.define_nn()

        self.learning_rate = wup.LearningRate(alpha=0.1)
        self.params = wup.GradientDescentParams(self.learning_rate)
        self.net.set_weight_update_function(self.params)
        np.random.seed(1)
        self.net.initialize_parameters()
        self.initialize()

        self.set_training_example()
        self.assertTrue(self.net.check_gradient(self.x, self.y))
        self.assertTrue(self.extra_checks())
예제 #5
0
layer = ld.hdict["fc"](10)
net.add_layer(layer)

# Add output layer
layer = ld.odict["softmax"](10)
net.add_layer(layer)

#  Neural Network definition done
net.check_arch()

# Specify l2 loss
net.set_l2_loss_coeff(.001)

# Define weight update method
learning_rate = wup.LearningRate(alpha=.3)
params = wup.GradientDescentParams(learning_rate)
# params = wup.MomentumParams(learning_rate)
# learning_rate = wup.LearningRate(alpha=.01)
# params = wup.AdamParams(learning_rate)
net.set_weight_update_function(params)

# For repeatability during testing
# np.random.seed(1)
# Initialize the network
net.initialize_parameters()

# Set training related parameters
mini_batch_size = 32
epochs = 20
verbose = 0