예제 #1
0
 def test_fprop_double_layer_one_weights_positive_output_values_relu(self):
     n_vis = 8
     n_hid = 2
     hidden_layer_1 = HiddenLayer(n_vis=n_vis,
                                  n_hid=n_vis / 2,
                                  layer_name='h1',
                                  activation='relu',
                                  param_init_range=0,
                                  alpha=0)
     hidden_layer_2 = HiddenLayer(n_vis=n_vis / 2,
                                  n_hid=n_hid,
                                  layer_name='h2',
                                  activation='relu',
                                  param_init_range=0,
                                  alpha=0)
     W = theano.shared(value=np.ones((n_vis, n_vis / 2)),
                       name='h1_W',
                       borrow=True)
     hidden_layer_1.W = W
     W = theano.shared(value=np.ones((n_vis / 2, n_hid)),
                       name='h2_W',
                       borrow=True)
     hidden_layer_2.W = W
     mlp = QNetwork([hidden_layer_1, hidden_layer_2],
                    discount=1,
                    learning_rate=1)
     features = np.ones(n_vis)
     actual = list(mlp.fprop(features).eval())
     expected = [32., 32.]
     self.assertSequenceEqual(actual, expected)
예제 #2
0
 def test_fprop_single_layer_zero_weights_positive_input_values_relu(self):
     hidden_layer = HiddenLayer(n_vis=4, n_hid=2, layer_name='h', activation='relu', param_init_range=0, alpha=0)
     mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
     features = [1, 2, 3, 4]
     actual = list(mlp.fprop(features).eval())
     expected = [0., 0.]
     self.assertSequenceEqual(actual, expected)
예제 #3
0
 def test_fprop_single_layer_one_weights_negative_output_values_relu(self):
     n_vis = 4
     n_hid = 2
     hidden_layer = HiddenLayer(n_vis=n_vis, n_hid=n_hid, layer_name='h', activation='relu', param_init_range=0, alpha=0)
     W = theano.shared(value=np.ones((n_vis, n_hid)), name='h_W', borrow=True)
     hidden_layer.W = W
     mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
     features = [-1, -2, -3, 4]
     actual = list(mlp.fprop(features).eval())
     expected = [0., 0.]
     self.assertSequenceEqual(actual, expected)
예제 #4
0
 def test_fprop_single_layer_zero_weights_positive_input_values_relu(self):
     hidden_layer = HiddenLayer(n_vis=4,
                                n_hid=2,
                                layer_name='h',
                                activation='relu',
                                param_init_range=0,
                                alpha=0)
     mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
     features = [1, 2, 3, 4]
     actual = list(mlp.fprop(features).eval())
     expected = [0., 0.]
     self.assertSequenceEqual(actual, expected)
예제 #5
0
 def test_fprop_double_layer_one_weights_negative_output_values_relu(self):
     n_vis = 8
     n_hid = 2
     hidden_layer_1 = HiddenLayer(n_vis=n_vis, n_hid=n_vis / 2, layer_name='h1', activation='relu', param_init_range=0, alpha=0)
     hidden_layer_2 = HiddenLayer(n_vis=n_vis / 2, n_hid=n_hid, layer_name='h2', activation='relu', param_init_range=0, alpha=0)
     W = theano.shared(value=np.ones((n_vis, n_vis / 2)), name='h1_W', borrow=True)
     hidden_layer_1.W = W
     W = theano.shared(value=np.ones((n_vis / 2, n_hid)), name='h2_W', borrow=True)
     hidden_layer_2.W = W
     mlp = QNetwork([hidden_layer_1, hidden_layer_2], discount=1, learning_rate=1)
     features = [-5, -4, -3, -2, -1, 0, 1, 2]
     actual = list(mlp.fprop(features).eval())
     expected = [0., 0.]
     self.assertSequenceEqual(actual, expected)
예제 #6
0
 def test_fprop_single_layer_one_weights_negative_output_values_relu(self):
     n_vis = 4
     n_hid = 2
     hidden_layer = HiddenLayer(n_vis=n_vis,
                                n_hid=n_hid,
                                layer_name='h',
                                activation='relu',
                                param_init_range=0,
                                alpha=0)
     W = theano.shared(value=np.ones((n_vis, n_hid)),
                       name='h_W',
                       borrow=True)
     hidden_layer.W = W
     mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
     features = [-1, -2, -3, 4]
     actual = list(mlp.fprop(features).eval())
     expected = [0., 0.]
     self.assertSequenceEqual(actual, expected)