Ejemplo n.º 1
0
    def testAddSimpleGPU(self):
        with self.test_session(use_gpu=True):
            # a test case for Add operation
            size = (2, 3)
            x1 = tf.constant(2.0, shape=size, name="x1")
            x2 = tf.constant(3.0, shape=size, name="x2")
            y = tf.add(x1, x2, name="y")

            # checking gradients for x1
            error = ComputeGradientError(x1, size, y, size)
        tf.logging.info("x1 error = %f", error)
        assert error < 1e-4
Ejemplo n.º 2
0
  def testGather(self):
    with self.test_session():
      p_shape = (4, 2)
      p_size = 8
      index_values = [1, 3]
      y_shape = [2, 2]
      params = tf.constant(np.arange(p_size).astype(np.float),
                                    shape=p_shape, name="p")
      indices = tf.constant(index_values, name="i")
      y = tf.gather(params, indices, name="y")

      error = ComputeGradientError(params, p_shape, y, y_shape)
    tf.logging.info("gather error = %f", error)
    assert error < 1e-4
Ejemplo n.º 3
0
  def testAddCustomized(self):
    with self.test_session():
      # a test case for Add operation
      size = (2, 3)
      x1 = tf.constant(2.0, shape=size, dtype=tf.float64,
                                name="x1")
      x2 = tf.constant(3.0, shape=size, dtype=tf.float64,
                                name="x2")
      y = tf.add(x1, x2, name="y")

      # checkint gradients for x2 using a special init_value and delta
      x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
      error = ComputeGradientError(x2, size, y, size, x_init_value=x_init_value,
                                   delta=1e-2)
    tf.logging.info("x2 error = %f", error)
    assert error < 1e-10
Ejemplo n.º 4
0
  def testNestedGather(self):
    with self.test_session():
      p_shape = (8, 2)
      p_size = 16
      index_values = [1, 3, 5, 6]
      index_values2 = [0, 2]
      y2_shape = [2, 2]

      params = tf.constant(np.arange(p_size).astype(np.float),
                                    shape=p_shape, name="p")
      indices = tf.constant(index_values, name="i")
      y = tf.gather(params, indices, name="y")
      indices2 = tf.constant(index_values2, name="i2")
      y2 = tf.gather(y, indices2, name="y2")

      error = ComputeGradientError(params, p_shape, y2, y2_shape)
    tf.logging.info("nested gather error = %f", error)
    assert error < 1e-4
Ejemplo n.º 5
0
 def _RunAndVerifyGradients(self):
     with self.test_session():
         # random shape
         shape = np.random.randint(1, 5, size=4)
         # Make depth at least 2 to make it meaningful
         shape[3] += 1
         # random depth_radius, bias, alpha, beta
         lrn_depth_radius = np.random.randint(1, shape[3])
         bias = 1.0 + np.random.rand()
         alpha = 1.0 * np.random.rand()
         beta = 1.0 * np.random.rand()
         inp_array = np.random.rand(*shape).astype("f")
         inp = tf.constant(list(inp_array.ravel(order="C")), shape=shape)
         lrn_op = tf.nn.local_response_normalization(
             inp,
             name="lrn",
             depth_radius=lrn_depth_radius,
             bias=bias,
             alpha=alpha,
             beta=beta)
         err = ComputeGradientError(inp, shape, lrn_op, shape)
     print "LRN Gradient error ", err
     self.assertLess(err, 1e-4)
Ejemplo n.º 6
0
def BuildAndTestMiniMNIST(param_index, tag):
    # Hyperparameters
    batch = 3
    inputs = 16
    features = 32
    classes = 10

    # Define the parameters
    inp_data = np.random.random_sample(inputs * batch)
    hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
    hidden_bias_data = np.random.random_sample(features)
    sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
    sm_bias_data = np.random.random_sample(classes)

    # special care for labels since they need to be normalized per batch
    label_data = np.random.random(batch * classes).reshape((batch, classes))
    s = label_data.sum(axis=1)
    label_data /= s[:, None]

    with tf.Session():
        # We treat the inputs as "parameters" here
        inp = tf.constant(inp_data.tolist(),
                          shape=[batch, inputs],
                          dtype=tf.float64,
                          name="inp")
        hidden_weight = tf.constant(hidden_weight_data.tolist(),
                                    shape=[inputs, features],
                                    dtype=tf.float64,
                                    name="hidden_weight")
        hidden_bias = tf.constant(hidden_bias_data.tolist(),
                                  shape=[features],
                                  dtype=tf.float64,
                                  name="hidden_bias")
        softmax_weight = tf.constant(sm_weight_data.tolist(),
                                     shape=[features, classes],
                                     dtype=tf.float64,
                                     name="softmax_weight")
        softmax_bias = tf.constant(sm_bias_data.tolist(),
                                   shape=[classes],
                                   dtype=tf.float64,
                                   name="softmax_bias")

        # List all the parameter so that we can test them one at a time
        all_params = [
            inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
        ]
        param_sizes = [
            [batch, inputs],  # inp
            [inputs, features],  # hidden_weight,
            [features],  # hidden_bias
            [features, classes],  # softmax_weight,
            [classes]
        ]  # softmax_bias

        # Now, Building MNIST
        features = tf.nn.relu(tf.nn.xw_plus_b(inp, hidden_weight, hidden_bias),
                              name="features")
        logits = tf.nn.xw_plus_b(features,
                                 softmax_weight,
                                 softmax_bias,
                                 name="logits")
        labels = tf.constant(label_data.tolist(),
                             shape=[batch, classes],
                             dtype=tf.float64,
                             name="labels")
        cost = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                       labels,
                                                       name="cost")

        # Test the gradients.
        err = ComputeGradientError(all_params[param_index],
                                   param_sizes[param_index],
                                   cost, [batch],
                                   delta=1e-5)

    tf.logging.info("Mini MNIST: %s gradient error = %g", tag, err)
    return err