Пример #1
0
 def testProjection(self):
   """Create a partial monotone linear layer and check the projection."""
   input_dim = 10
   is_monotone = [True, False] * 5
   input_placeholder = array_ops.placeholder(
       dtype=dtypes.float32, shape=[None, input_dim])
   # We set the initial_weight_mean to -10.0. After projection, we expect
   # elements corresponding to monotonic input becomes 0.
   packed_results = monotone_linear_layers.monotone_linear_layer(
       input_placeholder,
       input_dim=input_dim,
       output_dim=2,
       is_monotone=is_monotone,
       init_weight_mean=-10.0,
       init_weight_stddev=0.0)
   (_, weight_tensor, projection_op, _) = packed_results
   # The weight is in shape (output_dim, input_dim).
   expected_pre_projection_weight = [[-10.0] * 10] * 2
   expected_projected_weight = [[0.0, -10.0] * 5] * 2
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     pre_projection_weight = sess.run(weight_tensor)
     sess.run(projection_op)
     projected_weight = sess.run(weight_tensor)
   self.assertAllClose(expected_pre_projection_weight, pre_projection_weight)
   self.assertAllClose(expected_projected_weight, projected_weight)
 def testNormalizationProjection(self):
   """Test projection when l1 normalization is requested."""
   input_dim = 10
   is_monotone = [True, False] * 5
   input_placeholder = tf.compat.v1.placeholder(
       dtype=tf.float32, shape=[None, input_dim])
   # We set the initial_weight_mean to -10.0. After projection, we expect
   # elements corresponding to monotonic input becomes 0.
   packed_results = monotone_linear_layers.monotone_linear_layer(
       input_placeholder,
       input_dim=input_dim,
       output_dim=2,
       is_monotone=is_monotone,
       init_weight_mean=-10.0,
       init_weight_stddev=0.0,
       add_bias=False,
       normalization_order=1,
   )
   (_, weight_tensor, projection_op, _) = packed_results
   # The weight is in shape (output_dim, input_dim).
   expected_pre_projection_weight = [[-10.0] * 10] * 2
   expected_projected_weight = [[0.0, -0.2] * 5] * 2
   with self.session() as sess:
     sess.run(tf.compat.v1.global_variables_initializer())
     pre_projection_weight = sess.run(weight_tensor)
     sess.run(projection_op)
     projected_weight = sess.run(weight_tensor)
   self.assertAllClose(expected_pre_projection_weight, pre_projection_weight)
   self.assertAllClose(expected_projected_weight, projected_weight)
Пример #3
0
 def testNoRegularizationExpectsNone(self):
   """Create a monotone linear layer and check no regularization."""
   input_dim = 10
   input_placeholder = array_ops.placeholder(
       dtype=dtypes.float32, shape=[None, input_dim])
   # We set the initial_weight_mean to -10.0.
   (_, _, _, regularization) = monotone_linear_layers.monotone_linear_layer(
       input_placeholder,
       input_dim=input_dim,
       output_dim=2,
       init_weight_mean=-10.0,
       init_weight_stddev=0.0)
   self.assertIsNone(regularization)
Пример #4
0
 def testRegularization(self):
   """Create a monotone linear layer and check regularization."""
   input_dim = 10
   input_placeholder = array_ops.placeholder(
       dtype=dtypes.float32, shape=[None, input_dim])
   # We set the initial_weight_mean to -10.0.
   (_, _, _, regularization) = monotone_linear_layers.monotone_linear_layer(
       input_placeholder,
       input_dim=input_dim,
       output_dim=2,
       init_weight_mean=-10.0,
       init_weight_stddev=0.0,
       l1_reg=0.1,
       l2_reg=0.1)
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     self.assertAlmostEqual(220.0, sess.run(regularization), delta=1e-5)
Пример #5
0
 def testEvaluationWithZeroBias(self):
   """Create a partial monotone linear layer and check evaluation."""
   input_placeholder = array_ops.placeholder(
       dtype=dtypes.float32, shape=[None, 3])
   input_tensor = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
   sum_input_tensor = [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]
   # Check linearity of the output tensor.
   # f(input_tensor + input_tensor) = 2 * f(input_tensor)
   # since the bias is 0.
   packed_results = monotone_linear_layers.monotone_linear_layer(
       input_placeholder, input_dim=3, output_dim=5, init_bias=0.0)
   (output_tensor, _, _, _) = packed_results
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     # Check linearity of the output tensor.
     # f(input_tensor + input_tensor) = 2 * f(input_tensor)
     # since the bias is 0.
     output_val = sess.run(
         output_tensor, feed_dict={input_placeholder: input_tensor})
     sum_output_val = sess.run(
         output_tensor, feed_dict={input_placeholder: sum_input_tensor})
     expected_sum_output_val = 2 * output_val
   self.assertAllClose(expected_sum_output_val, sum_output_val)
Пример #6
0
 def testEvaluationWithDefaultBias(self):
   """Create a partial monotone linear layer and check the bias."""
   input_dim = 10
   input_placeholder = array_ops.placeholder(
       dtype=dtypes.float32, shape=[None, input_dim])
   # Monotone linear layers contain random weights and for this input_tensor
   # we expect 0 as an output on "average". In order to control randomness, we
   # set the standard deviation exactly zero.
   input_tensor = [[0.5] * input_dim]
   expected_output_val = [[0.0]]
   packed_results = monotone_linear_layers.monotone_linear_layer(
       input_placeholder,
       input_dim=input_dim,
       output_dim=1,
       init_weight_stddev=0.0)
   (output_tensor, _, _, _) = packed_results
   with self.test_session() as sess:
     sess.run(variables.global_variables_initializer())
     # Check linearity of the output tensor.
     # f(input_tensor + input_tensor) = 2 * f(input_tensor)
     # since the bias is 0.
     output_val = sess.run(
         output_tensor, feed_dict={input_placeholder: input_tensor})
   self.assertAllClose(expected_output_val, output_val)