def _multiple_ops_in_middle():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  x = gen_nn_ops.relu(x, name='hey')
  x = gen_nn_ops.relu(x, name='hey2')
  outputs = keras.layers.Dense(10)(x)
  return inputs, outputs
 def test_numerical_correctness_simple(self):
   x = ops.convert_to_tensor([[-1., 0., -2., 1.]])
   inputs = keras.Input(shape=(4,))
   outputs = gen_nn_ops.relu(inputs)
   model = keras.Model(inputs, outputs)
   y = self.evaluate(model(x))
   self.assertAllClose(y, [[0., 0., 0., 1.]])
 def test_built(self):
   inputs = keras.Input(shape=(10,))
   outputs = gen_nn_ops.relu(inputs)
   model = keras.Model(inputs, outputs)
   model.compile('sgd', 'mse')
   for layer in model.layers:
     self.assertTrue(layer.built)
   # Test something that requires Layers to be built.
   model.summary()
def _reuse_op():
  inputs = keras.Input(shape=(10,))
  # This op needs to be checked multiple times.
  x = gen_nn_ops.relu(inputs)
  y = keras.layers.Dense(10)(x)
  x2 = x * 2
  y2 = keras.layers.Dense(10)(x2)
  outputs = y + y2
  return inputs, outputs
 def test_serialization(self):
   x = ops.convert_to_tensor([-1., 0., -2., 1.])
   inputs = keras.Input(shape=(4,))
   outputs = gen_nn_ops.relu(inputs)
   model1 = keras.Model(inputs, outputs)
   y1 = self.evaluate(model1(x))
   model2 = model1.from_config(model1.get_config())
   y2 = self.evaluate(model2(x))
   self.assertAllClose(y1, y2)
    def _construct_graph_of_size(size):
      start = time.time()
      x = keras.backend.placeholder(shape=(10, 4))

      for _ in range(size):
        x = keras.layers.Dense(4)(x)
        x = gen_nn_ops.relu(x)

      end = time.time()
      return end - start
예제 #7
0
def per_image_whitening(image):
    """Linearly scales `image` to have zero mean and unit norm.

  This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
  of all values in image, and
  `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

  `stddev` is the standard deviation of all values in `image`. It is capped
  away from zero to protect against division by 0 when handling uniform images.

  Note that this implementation is limited:
  *  It only whitens based on the statistics of an individual image.
  *  It does not take into account the covariance structure.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`.

  Returns:
    The whitened image with same shape as `image`.

  Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
  """
    image = ops.convert_to_tensor(image, name='image')
    _Check3DImage(image, require_static=False)
    num_pixels = math_ops.reduce_prod(array_ops.shape(image))

    image = math_ops.cast(image, dtype=dtypes.float32)
    image_mean = math_ops.reduce_mean(image)

    variance = (math_ops.reduce_mean(math_ops.square(image)) -
                math_ops.square(image_mean))
    variance = gen_nn_ops.relu(variance)
    stddev = math_ops.sqrt(variance)

    # Apply a minimum normalization that protects us against uniform images.
    min_stddev = math_ops.inv(
        math_ops.sqrt(math_ops.cast(num_pixels, dtypes.float32)))
    pixel_value_scale = math_ops.maximum(stddev, min_stddev)
    pixel_value_offset = image_mean

    image = math_ops.sub(image, pixel_value_offset)
    image = math_ops.div(image, pixel_value_scale)
    return image
예제 #8
0
def per_image_whitening(image):
  """Linearly scales `image` to have zero mean and unit norm.

  This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
  of all values in image, and
  `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

  `stddev` is the standard deviation of all values in `image`. It is capped
  away from zero to protect against division by 0 when handling uniform images.

  Note that this implementation is limited:
  *  It only whitens based on the statistics of an individual image.
  *  It does not take into account the covariance structure.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`.

  Returns:
    The whitened image with same shape as `image`.

  Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
  """
  image = ops.convert_to_tensor(image, name='image')
  _Check3DImage(image, require_static=False)
  num_pixels = math_ops.reduce_prod(array_ops.shape(image))

  image = math_ops.cast(image, dtype=dtypes.float32)
  image_mean = math_ops.reduce_mean(image)

  variance = (math_ops.reduce_mean(math_ops.square(image)) -
              math_ops.square(image_mean))
  variance = gen_nn_ops.relu(variance)
  stddev = math_ops.sqrt(variance)

  # Apply a minimum normalization that protects us against uniform images.
  min_stddev = math_ops.inv(
      math_ops.sqrt(math_ops.cast(num_pixels, dtypes.float32)))
  pixel_value_scale = math_ops.maximum(stddev, min_stddev)
  pixel_value_offset = image_mean

  image = math_ops.sub(image, pixel_value_offset)
  image = math_ops.div(image, pixel_value_scale)
  return image
  def test_gradient_tape_in_function(self):
    z = keras.Input((1,))
    x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1)))
    x = math_ops.reduce_mean(x, axis=0, keepdims=True)
    h = gen_nn_ops.relu(x)
    m = keras.Model(z, h)

    @def_function.function()
    def f(x):
      with backprop.GradientTape() as t:
        t.watch(x)
        z = m(x ** 2)
      grads = t.gradient(z, x)
      return grads

    self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
                        constant_op.constant(40.0, shape=(1, 1)))

    f = def_function.function(f)

    self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
                        constant_op.constant(40.0, shape=(1, 1)))
  def test_gradient_tape_in_function(self):
    z = keras.Input((1,))
    x = math_ops.matmul(z, constant_op.constant(2.0, shape=(1, 1)))
    x = math_ops.reduce_mean(x, axis=0, keepdims=True)
    h = gen_nn_ops.relu(x)
    m = keras.Model(z, h)

    @def_function.function()
    def f(x):
      with backprop.GradientTape() as t:
        t.watch(x)
        z = m(x ** 2)
      grads = t.gradient(z, x)
      return grads

    self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
                        constant_op.constant(40.0, shape=(1, 1)))

    f = def_function.function(f)

    self.assertAllEqual(f(constant_op.constant(10.0, shape=(1, 1))),
                        constant_op.constant(40.0, shape=(1, 1)))
def per_image_standardization(image):
    """Linearly scales `image` to have zero mean and unit variance.
    This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
    of all values in image, and
    `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
    `stddev` is the standard deviation of all values in `image`. It is capped
    away from zero to protect against division by 0 when handling uniform images.
    Args:
    image: An n-D Tensor where the last 3 dimensions are
           `[height, width, channels]`.
    Returns:
    The standardized image with same shape as `image`.
    Raises:
    ValueError: if the shape of 'image' is incompatible with this function.
    """
    with ops.name_scope(None, 'per_image_standardization', [image]) as scope:
        image = ops.convert_to_tensor(image, name='image')
        num_pixels = math_ops.reduce_prod(array_ops.shape(image)[1:4])
        image = math_ops.cast(image, dtype=dtypes.float32)
        image_mean = math_ops.reduce_mean(image,
                                          axis=[-1, -2, -3],
                                          keepdims=True)
        variance = (math_ops.reduce_mean(
            math_ops.square(image), axis=[-1, -2, -3], keepdims=True) -
                    math_ops.square(image_mean))
        variance = gen_nn_ops.relu(variance)
        stddev = math_ops.sqrt(variance)

        # Apply a minimum normalization that protects us against uniform images.
        min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32))
        pixel_value_scale = math_ops.maximum(stddev, min_stddev)
        pixel_value_offset = image_mean

        image = math_ops.subtract(image, pixel_value_offset)
        image = math_ops.div(image, pixel_value_scale, name=scope)
        return image
def _single_op_in_middle():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  x = gen_nn_ops.relu(x)
  outputs = keras.layers.Dense(10)(x)
  return keras.Model(inputs, outputs)
def _multiple_ops_at_end():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  x = gen_nn_ops.relu(x)
  outputs = gen_nn_ops.relu(x)
  return keras.Model(inputs, outputs)
예제 #14
0
 def predict(self, inputTensor, _):
     result = gen_nn_ops.relu(inputTensor)
     return (result)
예제 #15
0
def _single_op_at_end():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  outputs = gen_nn_ops.relu(x)
  return inputs, outputs
예제 #16
0
def _single_op_at_end():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  outputs = gen_nn_ops.relu(x, name='hey')
  return inputs, outputs
def _multiple_ops_at_end():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  x = gen_nn_ops.relu(x)
  outputs = gen_nn_ops.relu(x)
  return inputs, outputs
def _single_op_in_middle():
  inputs = keras.Input(shape=(10,))
  x = keras.layers.Dense(10)(inputs)
  x = gen_nn_ops.relu(x)
  outputs = keras.layers.Dense(10)(x)
  return inputs, outputs