Esempio n. 1
0
 def create_layers(self, state, **kwargs):
     action = Variable(np.ones(env.n_actions))
     output = SoftMax(in_layers=[
         Reshape(in_layers=[action], shape=(-1, env.n_actions))
     ])
     value = Variable([0.0])
     return {'action_prob': output, 'value': value}
Esempio n. 2
0
 def test_variable(self):
     """Test that Variable can be invoked."""
     value = np.random.uniform(size=(2, 3)).astype(np.float32)
     with self.session() as sess:
         out_tensor = Variable(value)()
         sess.run(tf.global_variables_initializer())
         assert np.array_equal(value, out_tensor.eval())
Esempio n. 3
0
 def test_variable(self):
   """Test that Variable can be invoked."""
   value = np.random.uniform(size=(2, 3)).astype(np.float32)
   with self.session() as sess:
     out_tensor = Variable(value)()
     sess.run(tf.global_variables_initializer())
     assert np.array_equal(value, out_tensor.eval())
Esempio n. 4
0
 def test_convert_to_tensor(self):
     """Test implicit conversion of Layers to Tensors."""
     v = Variable(np.array([1.5]))
     v.create_tensor()
     with self.session() as sess:
         sess.run(tf.global_variables_initializer())
         result = sess.run(v)
         assert result == 1.5
         result = sess.run(tf.gradients(v, v))
         assert result[0] == 1.0
Esempio n. 5
0
 def test_convert_to_tensor(self):
   """Test implicit conversion of Layers to Tensors."""
   v = Variable(np.array([1.5]))
   v.create_tensor()
   with self.session() as sess:
     sess.run(tf.global_variables_initializer())
     result = sess.run(v)
     assert result == 1.5
     result = sess.run(tf.gradients(v, v))
     assert result[0] == 1.0
 def test_initialize_variable(self):
     """Test methods for initializing a variable."""
     tg = dc.models.TensorGraph(use_queue=False)
     features = Feature(shape=(None, 1))
     tg.set_loss(Dense(1, in_layers=features))
     var = Variable([10.0])
     tg.add_output(var)
     tg.fit_generator([])
     assert tg.predict_on_batch(np.zeros((1, 1))) == [10.0]
     var.set_variable_initial_values([[15.0]])
     tg.fit_generator([])
     assert tg.predict_on_batch(np.zeros((1, 1))) == [15.0]
Esempio n. 7
0
    def test_submodels(self):
        """Test optimizing submodels."""
        tg = dc.models.TensorGraph(learning_rate=0.1,
                                   batch_size=1,
                                   use_queue=False)
        features = Feature(shape=(None, 1))
        var1 = Variable([2.0])
        var2 = Variable([2.0])
        tg.add_output(var1)
        tg.add_output(var2)
        loss = (var1 - 1) * (var1 - 1) + (var2 - 1) * (var2 - 1) + features
        tg.set_loss(loss)
        subloss1 = var1 * var1 + features
        subloss2 = var1 * var1 + var2 * var2 + features
        submodel1 = tg.create_submodel(loss=subloss1)
        submodel2 = tg.create_submodel(layers=[var2], loss=subloss2)
        data = np.zeros((1, 1))
        generator = [{features: data}] * 500

        # Optimize submodel 1.  This should send var1 to 0 while leaving var2 unchanged.

        tg.fit_generator(generator, submodel=submodel1)
        self.assertAlmostEqual(0.0,
                               tg.predict_on_batch(data, outputs=var1)[0],
                               places=4)
        self.assertAlmostEqual(2.0,
                               tg.predict_on_batch(data, outputs=var2)[0],
                               places=4)

        # Optimize the main loss.  This should send both variables toward 1.

        tg.fit_generator(generator)
        self.assertAlmostEqual(1.0,
                               tg.predict_on_batch(data, outputs=var1)[0],
                               places=4)
        self.assertAlmostEqual(1.0,
                               tg.predict_on_batch(data, outputs=var2)[0],
                               places=4)

        # Optimize submodel 2.  This should send var2 to 0 while leaving var1 unchanged.

        tg.fit_generator(generator, submodel=submodel2)
        self.assertAlmostEqual(1.0,
                               tg.predict_on_batch(data, outputs=var1)[0],
                               places=4)
        self.assertAlmostEqual(0.0,
                               tg.predict_on_batch(data, outputs=var2)[0],
                               places=4)
Esempio n. 8
0
      def create_layers(self, state, **kwargs):

        reshaped = Reshape(shape=(1, -1, 10), in_layers=state)
        gru = GRU(n_hidden=10, batch_size=1, in_layers=reshaped)
        output = SoftMax(
            in_layers=[Reshape(in_layers=[gru], shape=(-1, env.n_actions))])
        value = Variable([0.0])
        return {'action_prob': output, 'value': value}
Esempio n. 9
0
def test_Variable_pickle():
  tg = TensorGraph()
  feature = Feature(shape=(tg.batch_size, 1))
  layer = Variable(np.array([15.0]))
  output = Multiply(in_layers=[feature, layer])
  tg.add_output(output)
  tg.set_loss(output)
  tg.build()
  tg.save()
Esempio n. 10
0
  def test_initialize_variable(self):
    """Test methods for initializing a variable."""
    # Set by variable constructor.

    tg = dc.models.TensorGraph(use_queue=False)
    features = Feature(shape=(None, 1))
    tg.set_loss(Dense(1, in_layers=features))
    var = Variable([10.0])
    tg.add_output(var)
    assert tg.predict_on_batch(np.zeros((1, 1))) == [10.0]

    # Set by set_variable_initial_values().

    tg = dc.models.TensorGraph(use_queue=False)
    features = Feature(shape=(None, 1))
    tg.set_loss(Dense(1, in_layers=features))
    var = Variable([10.0])
    var.set_variable_initial_values([[15.0]])
    tg.add_output(var)
    assert tg.predict_on_batch(np.zeros((1, 1))) == [15.0]
Esempio n. 11
0
    def test_get_layer_variable_values(self):
        """Test to get the variable values associated with a layer"""
        # Test for correct value return (normal mode)
        tg = dc.models.TensorGraph()
        var = Variable([10.0, 12.0])
        tg.add_output(var)
        expected = [10.0, 12.0]
        obtained = tg.get_layer_variable_values(var)[0]
        np.testing.assert_array_equal(expected, obtained)

        # Test for shapes (normal mode)
        tg = dc.models.TensorGraph()
        input_tensor = Input(shape=(10, 100))
        output = Dense(out_channels=20, in_layers=[input_tensor])
        tg.add_output(output)
        expected_shape = (100, 20)
        obtained_shape = tg.get_layer_variable_values(output)[0].shape
        assert expected_shape == obtained_shape