Пример #1
0
def _compute_gradient(x,
                      x_shape,
                      dx,
                      y,
                      y_shape,
                      dy,
                      x_init_value=None,
                      delta=1e-3,
                      extra_feed_dict=None):
  """Computes the theoretical and numerical jacobian."""
  t = dtypes.as_dtype(x.dtype)
  allowed_types = [dtypes.float16, dtypes.bfloat16, dtypes.float32,
                   dtypes.float64, dtypes.complex64, dtypes.complex128]
  assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
  t2 = dtypes.as_dtype(y.dtype)
  assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name

  if x_init_value is not None:
    i_shape = list(x_init_value.shape)
    assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
        x_shape, i_shape)
    x_data = x_init_value
  else:
    x_data = np.random.random_sample(x_shape).astype(t.as_numpy_dtype)
    if t.is_complex:
      x_data.imag = np.random.random_sample(x_shape)

  jacob_t = _compute_theoretical_jacobian(
      x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
  jacob_n = _compute_numeric_jacobian(
      x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
  return jacob_t, jacob_n
Пример #2
0
def _compute_gradient(x,
                      x_shape,
                      dx,
                      y,
                      y_shape,
                      dy,
                      x_init_value=None,
                      delta=1e-3):
  """Computes the theoretical and numerical jacobian."""
  t = dtypes.as_dtype(x.dtype)
  allowed_types = [dtypes.float32, dtypes.float64, dtypes.complex64]
  assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
  t2 = dtypes.as_dtype(y.dtype)
  assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name

  if x_init_value is not None:
    i_shape = list(x_init_value.shape)
    assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
        x_shape, i_shape)
    x_data = x_init_value
  else:
    if t == dtypes.float32:
      dtype = np.float32
    else:
      dtype = np.float64
    x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)

  jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx)
  jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta)
  return jacob_t, jacob_n
 def _verifySolve(self, x, y, batch_dims=None):
   for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
     if np_type == np.float32 or np_type == np.complex64:
       tol = 1e-5
     else:
       tol = 1e-12
     for adjoint in False, True:
       if np_type is [np.float32, np.float64]:
         a = x.real().astype(np_type)
         b = y.real().astype(np_type)
       else:
         a = x.astype(np_type)
         b = y.astype(np_type)
         a_np = np.conj(np.transpose(a)) if adjoint else a
       if batch_dims is not None:
         a = np.tile(a, batch_dims + [1, 1])
         a_np = np.tile(a_np, batch_dims + [1, 1])
         b = np.tile(b, batch_dims + [1, 1])
       np_ans = np.linalg.solve(a_np, b)
       for use_placeholder in False, True:
         with self.test_session(use_gpu=True) as sess:
           if use_placeholder:
             a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
             tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
             out = sess.run(tf_ans, {a_ph: a, b_ph: b})
           else:
             tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
             out = tf_ans.eval()
             self.assertEqual(tf_ans.get_shape(), out.shape)
           self.assertEqual(np_ans.shape, out.shape)
           self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
  def testUnsortedSegmentOps1DIndices1DDataNegativeIndices(self):
    """Tests for min, max, and prod ops.

    These share most of their implementation with sum, so we only test basic
    functionality.
    """
    for dtype in self.numeric_types:
      self.assertAllClose(
          np.array([8, 3, 1, 0], dtype=dtype),
          self._unsortedSegmentProd(
              np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
              np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))

    for dtype in self.int_types | self.float_types:
      minval = dtypes.as_dtype(dtype).min
      maxval = dtypes.as_dtype(dtype).max

      self.assertAllClose(
          np.array([2, 3, maxval, 0], dtype=dtype),
          self._unsortedSegmentMin(
              np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
              np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
      self.assertAllClose(
          np.array([4, 3, minval, 6], dtype=dtype),
          self._unsortedSegmentMax(
              np.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype),
              np.array([3, -1, 0, 1, 0, -1, 3], dtype=np.int32), 4))
def remote_fused_graph_execute(inputs,
                               output_types,
                               graph_def,
                               graph_input_node_names,
                               graph_output_node_names,
                               executor_name,
                               serialized_executor_parameters,
                               default_graph_input_tensor_type_shapes=None,
                               default_graph_output_tensor_type_shapes=None):
  """A wrapper for remote_fused_graph_execute."""
  info_proto = info_pb2.RemoteFusedGraphExecuteInfo()
  info_proto.remote_graph.CopyFrom(graph_def)
  info_proto.graph_input_node_name.extend(graph_input_node_names)
  info_proto.graph_output_node_name.extend(graph_output_node_names)
  info_proto.executor_name = executor_name
  info_proto.serialized_executor_parameters = serialized_executor_parameters
  if default_graph_input_tensor_type_shapes:
    for type_shape in default_graph_input_tensor_type_shapes:
      type_shape_proto = info_proto.default_graph_input_tensor_shape.add()
      type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
      for dim in type_shape[1]:
        type_shape_proto.shape.dim.add().size = dim
  if default_graph_output_tensor_type_shapes:
    for type_shape in default_graph_output_tensor_type_shapes:
      type_shape_proto = info_proto.default_graph_output_tensor_shape.add()
      type_shape_proto.dtype = dtypes.as_dtype(type_shape[0]).as_datatype_enum
      for dim in type_shape[1]:
        type_shape_proto.shape.dim.add().size = dim

  serialized_info = info_proto.SerializeToString()

  return gen_remote_fused_graph_ops.remote_fused_graph_execute(
      inputs, output_types, serialized_info)
Пример #6
0
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
  """Fill in default values for grad_ys.

  Args:
    grad_ys: List of gradients, can contain None.
    ys: List of tensors.
    colocate_gradients_with_ops: If True, try colocating gradients with
      the corresponding op.

  Returns:
    A list of gradients to use, without None.

  Raises:
    ValueError: If one of the grad_ys is invalid.
  """
  if len(grad_ys) != len(ys):
    raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
  grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
  for i in xrange(len(grad_ys)):
    grad_y = grad_ys[i]
    y = ys[i]
    if grad_y is None:
      with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
        grad_ys[i] = array_ops.fill(
            array_ops.shape(y), constant_op.constant(
                1, dtype=y.dtype))
    else:
      if grad_y.dtype != y.dtype:
        raise ValueError("Y and ys_grad must be of the same type, "
                         "not y: %s, ys_grad: %s " %
                         (dtypes.as_dtype(y.dtype).name,
                          dtypes.as_dtype(grad_y.dtype).name))
  return grad_ys
Пример #7
0
def _SatisfiesTypeConstraint(dtype, attr_def):
  if attr_def.HasField("allowed_values"):
    allowed_list = attr_def.allowed_values.list.type
    if dtype not in allowed_list:
      raise TypeError(
          "DataType %s for attr '%s' not in list of allowed values: %s" %
          (dtypes.as_dtype(dtype).name, attr_def.name,
           ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
Пример #8
0
 def input_builder(self):
     """Builds inputs in the graph."""
     input_shape = [None] + self.input_shape[1:]
     output_shape = [None] + self.output_shape[1:]
     self._input_placeholder = array_ops.placeholder(dtypes.as_dtype(self.input_dtype), input_shape,
         name="input")
     self._output_placeholder = array_ops.placeholder(dtypes.as_dtype(self.output_dtype), output_shape,
         name="output")
     return self._input_placeholder, self._output_placeholder
Пример #9
0
 def _testTernary(self, op, a, b, c, expected):
   with self.test_session() as session:
     with self.test_scope():
       pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
       pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
       pc = array_ops.placeholder(dtypes.as_dtype(c.dtype), c.shape, name="c")
       output = op(pa, pb, pc)
     result = session.run(output, {pa: a, pb: b, pc: c})
     self.assertAllClose(result, expected, rtol=1e-3)
Пример #10
0
  def __init__(self, key_dtype, value_dtype):
    """Construct a table initializer object.

    Args:
      key_dtype: Type of the table keys.
      value_dtype: Type of the table values.
    """
    self._key_dtype = dtypes.as_dtype(key_dtype)
    self._value_dtype = dtypes.as_dtype(value_dtype)
Пример #11
0
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
  if attr_def.HasField("allowed_values"):
    allowed_list = attr_def.allowed_values.list.type
    if dtype not in allowed_list:
      raise TypeError(
          "Value passed to parameter '%s' has DataType %s not in list of "
          "allowed values: %s" %
          (param_name, dtypes.as_dtype(dtype).name,
           ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
  def _verifySolve(self,
                   x,
                   y,
                   dtype,
                   use_placeholder,
                   fast,
                   l2_regularizer,
                   batch_shape=()):
    if not fast and l2_regularizer != 0:
      # The slow path does not support regularization.
      return
    maxdim = np.max(x.shape)
    if dtype == np.float32 or dtype == np.complex64:
      tol = maxdim * 5e-4
    else:
      tol = maxdim * 5e-7
      a = x.astype(dtype)
      b = y.astype(dtype)
      if dtype in [np.complex64, np.complex128]:
        a.imag = a.real
        b.imag = b.real
      # numpy.linalg.lstqr does not batching, so we just solve a single system
      # and replicate the solution. and residual norm.
      np_ans = _SolveWithNumpy(x, y, l2_regularizer=l2_regularizer)
      np_r = np.dot(np.conj(a.T), b - np.dot(a, np_ans))
      np_r_norm = np.sqrt(np.sum(np.conj(np_r) * np_r))
      if batch_shape is not ():
        a = np.tile(a, batch_shape + (1, 1))
        b = np.tile(b, batch_shape + (1, 1))
        np_ans = np.tile(np_ans, batch_shape + (1, 1))
        np_r_norm = np.tile(np_r_norm, batch_shape)
      with self.cached_session(use_gpu=fast) as sess:
        if use_placeholder:
          a_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
          b_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
          feed_dict = {a_ph: a, b_ph: b}
          tf_ans = linalg_ops.matrix_solve_ls(
              a_ph, b_ph, fast=fast, l2_regularizer=l2_regularizer)
        else:
          tf_ans = linalg_ops.matrix_solve_ls(
              a, b, fast=fast, l2_regularizer=l2_regularizer)
          feed_dict = {}
          self.assertEqual(np_ans.shape, tf_ans.get_shape())
        if l2_regularizer == 0:
          # The least squares solution should satisfy A^H * (b - A*x) = 0.
          tf_r = b - math_ops.matmul(a, tf_ans)
          tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
          tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
          tf_ans_val, tf_r_norm_val = sess.run(
              [tf_ans, tf_r_norm], feed_dict=feed_dict)
          self.assertAllClose(np_r_norm, tf_r_norm_val, atol=tol, rtol=tol)
        else:
          tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)

      self.assertEqual(np_ans.shape, tf_ans_val.shape)
      self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)
Пример #13
0
def make_attr(attr_type, value):
  if attr_type == pywrap_tensorflow.TF_ATTR_TYPE:
    return dtypes.as_dtype(value)
  elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]:
    return [dtypes.as_dtype(v) for v in value]
  elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE:
    return tensor_shape.as_shape(value).as_proto()
  elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]:
    return [tensor_shape.as_shape(v).as_proto() for v in value]
  return value
Пример #14
0
  def __init__(self, key_dtype, value_dtype):
    """Construct a lookup table interface.

    Args:
      key_dtype: The table key type.
      value_dtype: The table value type.
    """
    self._key_dtype = dtypes.as_dtype(key_dtype)
    self._value_dtype = dtypes.as_dtype(value_dtype)
    super(LookupInterface, self).__init__()
Пример #15
0
 def _testBinary(self, op, a, b, expected, equality_test=None):
   with self.test_session() as session:
     with self.test_scope():
       pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
       pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
       output = op(pa, pb)
     result = session.run(output, {pa: a, pb: b})
     if equality_test is None:
       equality_test = self.assertAllCloseAccordingToType
     equality_test(result, expected, rtol=1e-3)
Пример #16
0
  def __init__(self, key_dtype, value_dtype, name):
    """Construct a lookup table interface.

    Args:
      key_dtype: The table key type.
      value_dtype: The table value type.
      name: A name for the operation (optional).
    """
    self._key_dtype = dtypes.as_dtype(key_dtype)
    self._value_dtype = dtypes.as_dtype(value_dtype)
    self._name = name
Пример #17
0
 def testAllTypesConvertibleToNumpyDtype(self):
   for datatype_enum in types_pb2.DataType.values():
     if not _is_numeric_dtype_enum(datatype_enum):
       continue
     dtype = dtypes.as_dtype(datatype_enum)
     numpy_dtype = dtype.as_numpy_dtype
     _ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
     if dtype.base_dtype != dtypes.bfloat16:
       # NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
       self.assertEqual(
           dtypes.as_dtype(datatype_enum).base_dtype,
           dtypes.as_dtype(numpy_dtype))
Пример #18
0
 def get_placeholder(shape, dtype, name_prepend):
   if shape is None:
     return None
   if isinstance(shape, dict):
     placeholder = {}
     for key in list(shape.keys()):
       placeholder[key] = array_ops.placeholder(
           dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
           name=name_prepend + '_' + key)
   else:
     placeholder = array_ops.placeholder(
         dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
   return placeholder
Пример #19
0
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
  """Fill in default values for grad_ys.

  Args:
    grad_ys: List of gradients, can contain None.
    ys: List of tensors.
    colocate_gradients_with_ops: If True, try colocating gradients with
      the corresponding op.

  Returns:
    A list of gradients to use, without None.

  Raises:
    ValueError: If sizes of gradients and inputs don't match
    TypeError: If type of any gradient is not valid for its input.
  """
  if len(grad_ys) != len(ys):
    raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
  grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
  for i in xrange(len(grad_ys)):
    grad_y = grad_ys[i]
    y = ys[i]
    if grad_y is None:
      if y.dtype.is_complex:
        raise TypeError(
            "Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
            y.dtype)
      with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
        grad_ys[i] = array_ops.fill(
            array_ops.shape(y), constant_op.constant(
                1, dtype=y.dtype))
      continue
    if y.dtype.is_floating or y.dtype.is_integer:
      if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
        raise TypeError("Gradient type %s generated for real or "
                         "integer-valued tensor %s with type %s must be "
                         "real or integer" %
                         (dtypes.as_dtype(grad_y.dtype).name, y,
                          dtypes.as_dtype(y.dtype).name))
    elif y.dtype.is_complex:
      if not grad_y.dtype.is_complex:
        raise TypeError("Gradient type %s generated for complex-valued "
                         "tensor %s with type %s must be real" %
                         (dtypes.as_dtype(grad_y.dtype).name, y,
                          dtypes.as_dtype(y.dtype).name))
    else:
      raise TypeError("Tensor %s with type %s must be numeric "
                      "to obtain a default gradient" %
                      (y, dtypes.as_dtype(y.dtype).name))
  return grad_ys
Пример #20
0
def _ones(shape, dtype):
  if dtypes.as_dtype(dtype) == dtypes.string:
    return None

  if not context.context().executing_eagerly():
    return array_ops.ones(shape, dtype)

  if dtypes.as_dtype(dtype).is_bool:
    value = True
  else:
    value = 1

  if shape == ():  # pylint: disable=g-explicit-bool-comparison
    return constant_op.constant(value, dtype=dtype)
  return _fast_fill(value, shape, dtype)
Пример #21
0
def ones(shape, dtype=dtypes.float32, name=None):
  """Creates a tensor with all elements set to 1.

  This operation returns a tensor of type `dtype` with shape `shape` and all
  elements set to 1.

  For example:

  ```python
  tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]
  ```

  Args:
    shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
    dtype: The type of an element in the resulting `Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with all elements set to 1.
  """
  with ops.op_scope([shape], name, "ones") as name:
    if isinstance(shape, list):
      output = constant(1, shape=shape, dtype=dtype, name=name)
    else:
      shape = ops.convert_to_tensor(shape, name="shape")
      output = fill(shape, constant(1, dtype=dtype), name=name)
  assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype
  return output
Пример #22
0
def saturate_cast(value, dtype, name=None):
  """Performs a safe saturating cast of `value` to `dtype`.

  This function casts the input to `dtype` without applying any scaling.  If
  there is a danger that values would over or underflow in the cast, this op
  applies the appropriate clamping before the cast.

  Args:
    value: A `Tensor`.
    dtype: The desired output `DType`.
    name: A name for the operation (optional).

  Returns:
    `value` safely cast to `dtype`.
  """
  # When casting to a type with smaller representable range, clamp.
  # Note that this covers casting to unsigned types as well.
  with ops.op_scope([value], name, "saturate_cast") as name:
    value = ops.convert_to_tensor(value, name="value")
    dtype = dtypes.as_dtype(dtype).base_dtype
    if value.dtype.min < dtype.min:
      value = maximum(value, ops.convert_to_tensor(
          dtype.min, dtype=value.dtype, name="min"))
    if value.dtype.max > dtype.max:
      value = minimum(value, ops.convert_to_tensor(
          dtype.max, dtype=value.dtype, name="max"))
    return cast(value, dtype, name=name)
Пример #23
0
def _default_getter(name, shape, dtype, initializer=None,
                    partition_info=None, **kwargs):
  """A pared-down version of get_variable which does not reuse variables."""
  dtype = dtypes.as_dtype(dtype)
  shape_object = tensor_shape.as_shape(shape)
  with ops.init_scope():
    if initializer is None:
      initializer, initializing_from_value = (
          variable_scope._get_default_variable_store()._get_default_initializer(  # pylint: disable=protected-access
              name=name, shape=shape_object, dtype=dtype))
    else:
      initializing_from_value = not callable(initializer)
    # Same logic as get_variable
    variable_dtype = dtype.base_dtype
    if initializing_from_value:
      if shape is not None:
        raise ValueError("If initializer is a constant, do not specify shape.")
      initial_value = initializer
    else:
      # Instantiate initializer if provided initializer is a type object.
      if isinstance(initializer, type(init_ops.Initializer)):
        initializer = initializer(dtype=dtype)
      def initial_value():
        return initializer(
            shape_object.as_list(), dtype=dtype, partition_info=partition_info)
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value,
        name=name,
        dtype=variable_dtype,
        **kwargs
    )
Пример #24
0
  def _compare(self, fn, args, require_kernel_launch=True, noinline=None):
    with session_lib.Session(config=NoRewriteSessionConfig()) as sess:
      placeholders = []
      feeds = {}
      for arg in args:
        placeholder = array_ops.placeholder(
            dtypes.as_dtype(arg.dtype), list(arg.shape))
        placeholders.append(placeholder)
        feeds[placeholder] = arg

      compiled_op = CompiledKernel(fn, *placeholders, noinline=noinline)
      direct_op = fn(*placeholders)

      run_metadata = config_pb2.RunMetadata()
      compiled = test_utils.RunWithWarmup(
          sess, compiled_op, feeds,
          config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE),
          run_metadata)
      print("Compiled Result {}".format(compiled))

      if require_kernel_launch:
        self.assert_(MetadataHasXlaRunOp(run_metadata))

        direct = sess.run(direct_op, feeds)
        print("Direct Result {}".format(direct))

        if (isinstance(compiled, (tuple, list)) and
            (isinstance(direct, (tuple, list)))):
          for (x, y) in zip(compiled, direct):
            self.assertAllClose(x, y, rtol=1e-1)
        else:
          self.assertAllClose(compiled, direct, rtol=1e-2)
      def fn():
        ta = tensor_array_ops.TensorArray(
            dtype=dtypes.as_dtype(dtype),
            tensor_array_name="foo",
            size=3,
            infer_shape=False)

        value_0 = constant_op.constant(c([[4.0, 5.0]]))
        value_1 = constant_op.constant(c([[3.0, 3.5]]))

        w0 = ta.write(0, value_0)
        w1 = w0.write(1, value_1)
        r0 = w1.read(0)
        r1 = w1.read(1)
        r0_2 = w1.read(0)

        # Test individual components' gradients
        grad_just_r0 = gradients_impl.gradients(
            ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
        grad_r0_r0_2 = gradients_impl.gradients(
            ys=[r0, r0_2],
            xs=[value_0],
            grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
        grad_just_r1 = gradients_impl.gradients(
            ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
        # Test combined gradients
        grad = gradients_impl.gradients(
            ys=[r0, r0_2, r1],
            xs=[value_0, value_1],
            grad_ys=[c([[2.0, 3.0]]),
                     c([[1.0, -1.0]]),
                     c([[-2.0, -10.0]])])

        return [grad_just_r0, grad_r0_r0_2, grad_just_r1, grad]
Пример #26
0
  def testContribSignalSTFT(self):
    ws = 512
    hs = 128
    dims = (ws * 20,)
    shape = BATCH_DIMS + dims
    data = np.arange(np.prod(shape)) / np.prod(dims)
    np.random.seed(123)
    np.random.shuffle(data)
    data = np.reshape(data.astype(np.float32), shape)
    window = sps.get_window("hann", ws)
    expected = sps.stft(
        data, nperseg=ws, noverlap=ws - hs, boundary=None, window=window)[2]
    expected = np.swapaxes(expected, -1, -2)
    expected *= window.sum()  # scipy divides by window sum
    with self.test_session() as sess:
      with self.test_scope():
        ph = array_ops.placeholder(
            dtypes.as_dtype(data.dtype), shape=data.shape)
        out = signal.stft(ph, ws, hs)
        grad = gradients_impl.gradients(out, ph,
                                        grad_ys=array_ops.ones_like(out))

      # For gradients, we simply verify that they compile & execute.
      value, _ = sess.run([out, grad], {ph: data})
      self.assertAllClose(expected, value, rtol=RTOL, atol=ATOL)
Пример #27
0
def _from_definition(fdef, grad_func=None):
  """Creates a _DefinedFunction initialized from a FunctionDef proto.

  Args:
    fdef: a FunctionDef
    grad_func: a _DefinedFunction or None

  Returns:
    A _DefinedFunction representing fdef
  """
  # The Python callable is only needed to create a FunctionDef. Since we have
  # the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
  # have access to such a callable here).
  func = None
  argnames = [arg.name for arg in fdef.signature.input_arg]
  input_types = tuple(
      dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
  func_name = fdef.signature.name
  # Note: FunctionDefs do not include python gradient functions, so if the
  # original _DefinedFunction included one it will not be reflected here.
  python_grad_func = None
  out_names = [arg.name for arg in fdef.signature.output_arg]
  result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
                            python_grad_func, out_names)
  # pylint: disable=protected-access
  result._definition = fdef
  # Captured inputs are added as regular inputs to a function when it's
  # serialized, i.e. any extra inputs from the original function are now
  # included in `result`._args
  result._extra_inputs = []
  result._hash_str = result._create_hash_str(
      result._definition.signature.input_arg,
      result._definition.signature.output_arg, result._definition.node_def)
  # pylint: enable=protected-access
  return result
  def __init__(self,  images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True):
    """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.
    """
    dtype = dtypes.as_dtype(dtype).base_dtype
    if dtype not in (dtypes.uint8, dtypes.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      if reshape:
        assert images.shape[3] == 1
        images = images.reshape(images.shape[0],
                                images.shape[1] * images.shape[2])
      if dtype == dtypes.float32:
        # Convert from [0, 255] -> [0.0, 1.0].
        images = images.astype(numpy.float32)
        images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
Пример #29
0
def random_uniform(shape,
                   minval=0,
                   maxval=None,
                   dtype=dtypes.float32,
                   seed=None,
                   name=None):
  """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  For floats, the default range is `[0, 1)`.  For ints, at least `maxval` must
  be specified explicitly.

  In the integer case, the random integers are slightly biased unless
  `maxval - minval` is an exact power of two.  The bias is small for values of
  `maxval - minval` significantly smaller than the range of the output (either
  `2**32` or `2**64`).

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
      range of random values to generate.  Defaults to 0.
    maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
      the range of random values to generate.  Defaults to 1 if `dtype` is
      floating point.
    dtype: The type of the output: 'float16`, `float32`, `float64`, `int32`,
      or `int64`.
    seed: A Python integer. Used to create a random seed for the distribution.
      See @{tf.set_random_seed}
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.

  Raises:
    ValueError: If `dtype` is integral and `maxval` is not specified.
  """
  dtype = dtypes.as_dtype(dtype)
  if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
                   dtypes.int64):
    raise ValueError("Invalid dtype %r" % dtype)
  if maxval is None:
    if dtype.is_integer:
      raise ValueError("Must specify maxval for integer dtype %r" % dtype)
    maxval = 1
  with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
    shape = _ShapeTensor(shape)
    minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
    maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
    seed1, seed2 = random_seed.get_seed(seed)
    if dtype.is_integer:
      return gen_random_ops._random_uniform_int(
          shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
    else:
      rnd = gen_random_ops._random_uniform(
          shape, dtype, seed=seed1, seed2=seed2)
      return math_ops.add(rnd * (maxval - minval), minval, name=name)
Пример #30
0
  def __init__(self, shape, dtype, scale=None,
               verify_pd=True, name="OperatorPDIdentity"):
    """Initialize an `OperatorPDIdentity`.

    Args:
      shape:  `int32` rank 1 `Tensor` of length at least 2, and with the last
        two entries equal (since this is a square matrix).
      dtype:  Data type of the matrix that this operator represents.
      scale: floating point rank 0 `Tensor` representing a scalar to
        multiply the identity matrix by. This will default to a scale of 1.
        This will be converted to the dtype `dtype`.
      verify_pd:  `Boolean`, if `True`, asserts are added to the initialization
        args to ensure they define this operator as a square (batch) matrix.
      name:  Name to prepend to `Ops`.
    """

    # Grab static shape if available now.
    with ops.name_scope(name):
      with ops.name_scope("init", values=[shape, scale]):
        self._dtype = dtypes.as_dtype(dtype)
        self._verify_pd = verify_pd
        self._name = name

        # Store the static shape (if possible) right now before adding the
        # asserts, since the asserts prevent .constant_value from working.
        shape = ops.convert_to_tensor(shape, name="shape")
        self._get_shape = tensor_shape.TensorShape(
            tensor_util.constant_value(shape))
        self._shape_arg = self._check_shape(shape)
        self._scale = self._check_scale(scale, self._dtype)
Пример #31
0
 def testInvalid(self):
   with self.assertRaises(TypeError):
     dtypes.DType(types_pb2.DT_INVALID)
   with self.assertRaises(TypeError):
     dtypes.as_dtype(types_pb2.DT_INVALID)
    def RunTest(self, run_params):
        if not self.ShouldRunTest(run_params):
            return
        assert run_params.precision_mode in PRECISION_MODES
        np.random.seed(12345)

        params = self._GetParamsCached()
        input_gdef = params.gdef
        input_dtypes = {}
        for node in input_gdef.node:
            if self._ToString(node.name) in params.input_names:
                assert self._ToString(node.op) == "Placeholder"
                input_dtypes[self._ToString(node.name)] = (dtypes.as_dtype(
                    node.attr["dtype"].type).as_numpy_dtype())
        assert len(params.input_names) == len(input_dtypes)

        inputs_data = []
        for inp in params.input_dims:
            current_input_data = []
            for i in range(len(params.input_names)):
                dtype = input_dtypes[params.input_names[i]]
                # Multiply the input by some constant to avoid all zeros input for
                # integer types.
                scale = 10.0 if np.issubdtype(dtype, np.integer) else 1.0
                dims = inp[i]
                # TODO(laigd): add debug options. E.g. we can set the input data to be
                # continuous natural numbers:
                # seq = np.arange(np.prod(dims))
                # seq.resize(dims)
                # input_data.append(scale * seq.astype(dtype))
                current_input_data.append(
                    (scale * np.random.random_sample(dims)).astype(dtype))
            inputs_data.append(current_input_data)

        # Verify original graph.
        self._VerifyGraphDef(run_params, input_gdef, GraphState.ORIGINAL)

        # Run original graph without trt to get reference result.
        config_no_trt = self._GetConfigProto(run_params, GraphState.ORIGINAL)
        logging.info("Running original graph w/o trt, config:\n%s",
                     str(config_no_trt))
        ref_result = self._RunGraph(run_params, input_gdef, inputs_data,
                                    config_no_trt, GraphState.ORIGINAL)

        # Run calibration if necessary.
        if (IsQuantizationMode(run_params.precision_mode)
                and run_params.use_calibration):
            infer_gdef = self._GetCalibratedInferGraph(run_params, input_gdef,
                                                       inputs_data)
            self._VerifyGraphDef(run_params, infer_gdef, GraphState.INFERENCE)
        elif not run_params.use_optimizer:
            infer_gdef = self._GetInferGraph(run_params, input_gdef)
            self._VerifyGraphDef(run_params, infer_gdef, GraphState.INFERENCE)
        else:
            infer_gdef = input_gdef

        # Run inference.
        infer_config = self._GetConfigProto(run_params, GraphState.INFERENCE)
        logging.info("Running final inference graph, config:\n%s",
                     str(infer_config))
        result = self._RunGraph(run_params, infer_gdef, inputs_data,
                                infer_config, GraphState.INFERENCE)
        self.assertAllClose(ref_result,
                            result,
                            atol=self.ExpectedAbsoluteTolerance(run_params),
                            rtol=self.ExpectedRelativeTolerance(run_params))
Пример #33
0
 def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):
     self.mean = mean
     self.factor = factor
     self.seed = seed
     self.dtype = dtypes.as_dtype(dtype)
 def assertAC(self, x, y):
     """Derived classes can set _atol, _rtol to get different tolerance."""
     dtype = dtypes.as_dtype(x.dtype)
     atol = self._atol[dtype]
     rtol = self._rtol[dtype]
     self.assertAllClose(x, y, atol=atol, rtol=rtol)
Пример #35
0
  def _verifySolve(self,
                   x,
                   y,
                   dtype,
                   use_placeholder,
                   fast,
                   l2_regularizer,
                   batch_shape=()):
    if not fast and l2_regularizer != 0:
      # The slow path does not support regularization.
      return
    if use_placeholder and context.executing_eagerly():
      return
    maxdim = np.max(x.shape)
    if dtype == np.float32 or dtype == np.complex64:
      tol = maxdim * 5e-4
    else:
      tol = maxdim * 5e-7
      a = x.astype(dtype)
      b = y.astype(dtype)
      if dtype in [np.complex64, np.complex128]:
        a.imag = a.real
        b.imag = b.real
      # numpy.linalg.lstqr does not batching, so we just solve a single system
      # and replicate the solution. and residual norm.
      np_ans = _SolveWithNumpy(x, y, l2_regularizer=l2_regularizer)
      np_r = np.dot(np.conj(a.T), b - np.dot(a, np_ans))
      np_r_norm = np.sqrt(np.sum(np.conj(np_r) * np_r))
      if batch_shape is not ():
        a = np.tile(a, batch_shape + (1, 1))
        b = np.tile(b, batch_shape + (1, 1))
        np_ans = np.tile(np_ans, batch_shape + (1, 1))
        np_r_norm = np.tile(np_r_norm, batch_shape)
      if use_placeholder:
        a_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
        b_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
        feed_dict = {a_ph: a, b_ph: b}
        tf_ans = linalg_ops.matrix_solve_ls(
            a_ph, b_ph, fast=fast, l2_regularizer=l2_regularizer)
      else:
        tf_ans = linalg_ops.matrix_solve_ls(
            a, b, fast=fast, l2_regularizer=l2_regularizer)
        feed_dict = None
        self.assertEqual(np_ans.shape, tf_ans.get_shape())
      if feed_dict:
        with self.session(use_gpu=True) as sess:
          tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
      else:
        tf_ans_val = self.evaluate(tf_ans)
      self.assertEqual(np_ans.shape, tf_ans_val.shape)
      self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)

      if l2_regularizer == 0:
        # The least squares solution should satisfy A^H * (b - A*x) = 0.
        tf_r = b - math_ops.matmul(a, tf_ans)
        tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
        tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
        if feed_dict:
          with self.session(use_gpu=True) as sess:
            tf_ans_val, tf_r_norm_val = sess.run([tf_ans, tf_r_norm],
                                                 feed_dict=feed_dict)
        else:
          tf_ans_val, tf_r_norm_val = self.evaluate([tf_ans, tf_r_norm])
        self.assertAllClose(np_r_norm, tf_r_norm_val, atol=tol, rtol=tol)
Пример #36
0
    def __init__(self, method_name='runTest'):
        super(XLATestCase, self).__init__(method_name)
        if 'XLA' in FLAGS.test_device:
            context.context().enable_xla_devices()
        context.context(
        ).enable_mlir_bridge = test_util.is_mlir_bridge_enabled()

        self.device = FLAGS.test_device
        self.has_custom_call = (self.device == 'XLA_CPU')
        self._all_tf_types = set([
            dtypes.as_dtype(types_pb2.DataType.Value(name))
            for name in FLAGS.types.split(',')
        ])
        self.int_tf_types = set(
            [dtype for dtype in self._all_tf_types if dtype.is_integer])
        self._float_tf_types = set(
            [dtype for dtype in self._all_tf_types if dtype.is_floating])
        self.complex_tf_types = set(
            [dtype for dtype in self._all_tf_types if dtype.is_complex])
        self._numeric_tf_types = set(self.int_tf_types | self._float_tf_types
                                     | self.complex_tf_types)
        self.quantized_tf_types = set(dtype for dtype in self._all_tf_types
                                      if dtype.is_quantized)

        # Quantized types don't have a numpy equivalent, include them in
        # all_tf_types but not in all_types.
        # TODO(b/115960798): Parametrize tests on TF types instead of numpy types
        # and remove all_types.
        self._all_types = set(dtype.as_numpy_dtype
                              for dtype in self._all_tf_types
                              if not dtype.is_quantized)
        self._int_types = set(
            [dtype.as_numpy_dtype for dtype in self.int_tf_types])
        self.signed_int_types = set(dtype.as_numpy_dtype
                                    for dtype in self.int_tf_types
                                    if not dtype.is_unsigned)
        self.unsigned_int_types = set(dtype.as_numpy_dtype
                                      for dtype in self.int_tf_types
                                      if dtype.is_unsigned)
        self._float_types = set(
            [dtype.as_numpy_dtype for dtype in self._float_tf_types])
        self.complex_types = set(
            [dtype.as_numpy_dtype for dtype in self.complex_tf_types])
        self._numeric_types = set(self._int_types | self._float_types
                                  | self.complex_types)

        # Parse the manifest file, if any, into a regex identifying tests to
        # disable
        # TODO(xpan): Make it text proto if it doesn't scale.
        # Each line of the manifest file specifies an entry. The entry can be
        # 1) TestNameRegex  // E.g. CumprodTest.* Or
        # 2) TestName TypeName  // E.g. AdamOptimizerTest.testSharing DT_BFLOAT16
        # The 1) disables the entire test. While 2) only filter some numeric types
        # so that they are not used in those tests.
        self.disabled_regex = None
        self._method_types_filter = {}

        if FLAGS.disabled_manifest is not None:
            with open(FLAGS.disabled_manifest, 'r') as manifest_file:
                disabled_regex, self._method_types_filter = (
                    parse_disabled_manifest(manifest_file.read()))
                if disabled_regex:
                    self.disabled_regex = re.compile(disabled_regex)

        if FLAGS.tf_xla_flags is not None:
            os.environ['TF_XLA_FLAGS'] = FLAGS.tf_xla_flags
Пример #37
0
            if context.executing_eagerly():
                values = [
                    (seed,
                     stateless_op(seed=constant_op.constant(seed, seed_type)))
                    for seed in seeds
                ]
            else:
                # Have this branch because the above branch is too slow in graph
                # mode
                seed_t = array_ops.placeholder(seed_type, shape=[2])
                pure = stateless_op(seed=seed_t)
                values = [(seed, pure.eval(feed_dict={seed_t: seed}))
                          for seed in seeds]
            for s0, v0 in values:
                for s1, v1 in values:
                    if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16:
                        self.assertEqual(s0 == s1, np.all(v0 == v1))
                    elif s0 == s1:
                        # Skip the s0 != s1 case because v0 and v1 can be either equal or
                        # unequal in that case due to bfloat16's low precision
                        self.assertAllEqual(v0, v1)

    @parameterized.named_parameters(
        ('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed)  # pylint: disable=g-complex-comprehension
        for seed_id, seed in enumerate(SEEDS)
        for case_id, case in enumerate(float_cases()))
    @test_util.disable_tfrt(
        'tensorflow::DirectSession::Run crashes. b/156187396')
    def testMatchFloat(self, case, seed):
        if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
            # This test was passing before because soft placement silently picked the
Пример #38
0
 def assertDTypeEqual(self, a, b):
     self.assertEqual(dtypes.as_dtype(a), dtypes.as_dtype(b))
Пример #39
0
def random_uniform(shape,
                   minval=0,
                   maxval=None,
                   dtype=dtypes.float32,
                   seed=None,
                   name=None):
    """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  For floats, the default range is `[0, 1)`.  For ints, at least `maxval` must
  be specified explicitly.

  In the integer case, the random integers are slightly biased unless
  `maxval - minval` is an exact power of two.  The bias is small for values of
  `maxval - minval` significantly smaller than the range of the output (either
  `2**32` or `2**64`).

  Examples:

  >>> tf.random.uniform(shape=[2])
  <tf.Tensor: shape=(2,), dtype=float32, numpy=array([..., ...], dtype=float32)>
  >>> tf.random.uniform(shape=[], minval=-1., maxval=0.)
  <tf.Tensor: shape=(), dtype=float32, numpy=-...>
  >>> tf.random.uniform(shape=[], minval=5, maxval=10, dtype=tf.int64)
  <tf.Tensor: shape=(), dtype=int64, numpy=...>

  The `seed` argument produces a deterministic sequence of tensors across
  multiple calls. To repeat that sequence, use `tf.random.set_seed`:

  >>> tf.random.set_seed(5)
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=2>
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>
  >>> tf.random.set_seed(5)
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=2>
  >>> tf.random.uniform(shape=[], maxval=3, dtype=tf.int32, seed=10)
  <tf.Tensor: shape=(), dtype=int32, numpy=0>

  Without `tf.random.set_seed` but with a `seed` argument is specified, small
  changes to function graphs or previously executed operations will change the
  returned value. See `tf.random.set_seed` for details.

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A Tensor or Python value of type `dtype`, broadcastable with
      `maxval`. The lower bound on the range of random values to generate
      (inclusive).  Defaults to 0.
    maxval: A Tensor or Python value of type `dtype`, broadcastable with
      `minval`. The upper bound on the range of random values to generate
      (exclusive). Defaults to 1 if `dtype` is floating point.
    dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,
      or `int64`.
    seed: A Python integer. Used in combination with `tf.random.set_seed` to
      create a reproducible sequence of tensors across multiple calls.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.

  Raises:
    ValueError: If `dtype` is integral and `maxval` is not specified.
  """
    dtype = dtypes.as_dtype(dtype)
    if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
                     dtypes.float64, dtypes.int32, dtypes.int64):
        raise ValueError("Invalid dtype %r" % dtype)
    if maxval is None:
        if dtype.is_integer:
            raise ValueError("Must specify maxval for integer dtype %r" %
                             dtype)
        maxval = 1
    with ops.name_scope(name, "random_uniform",
                        [shape, minval, maxval]) as name:
        shape = tensor_util.shape_tensor(shape)
        minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
        maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
        seed1, seed2 = random_seed.get_seed(seed)
        if dtype.is_integer:
            result = gen_random_ops.random_uniform_int(shape,
                                                       minval,
                                                       maxval,
                                                       seed=seed1,
                                                       seed2=seed2,
                                                       name=name)
        else:
            rnd = gen_random_ops.random_uniform(shape,
                                                dtype,
                                                seed=seed1,
                                                seed2=seed2)
            result = math_ops.add(rnd * (maxval - minval), minval, name=name)
        # TODO(b/132092188): C++ shape inference inside functional ops does not
        # cross FuncGraph boundaries since that information is only available in
        # python. So we manually get the static shape using
        # `constant_value_as_shape` which *does* cross function boundaries.
        tensor_util.maybe_set_static_shape(result, shape)
        return result
Пример #40
0
 def testIsUnsigned(self):
     self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
     self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
     self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
     self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
Пример #41
0
 def testIsFloating(self):
     self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
     self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
     self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
     self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
     self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
     self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
     self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
     self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
     self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
     self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
     self.assertEqual(dtypes.as_dtype("string").is_floating, False)
     self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
     self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
     self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
     self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
     self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
     self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
     self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
Пример #42
0
    def do_transformation(self):
        """Fuse the quantized op with the following requantize op.
            The transformation has two stages, the first step is to fuse the patterns
            defined in self.fuse_patterns and the last step is to fuse the self.sum_patterns.
        Returns:
            [graphdef]: the optimized graphdef object
        """
        int8_type = dtypes.qint8.as_datatype_enum
        uint8_type = dtypes.quint8.as_datatype_enum
        float32_type = dtypes.float32.as_datatype_enum
        qint32_type = dtypes.qint32.as_datatype_enum

        while True:
            target_nodes = self.graph_analyzer.query_fusion_pattern_nodes(
                self.fuse_patterns)
            if len(target_nodes) == 0:
                break

            i = target_nodes[0]

            quantized_node_name = i[0]
            quantized_node = self.graph_info[quantized_node_name].node
            requantize_node_name = i[1]
            requantize_node = self.graph_info[requantize_node_name].node
            requested_output_min_name = requantize_node.input[3]
            requested_output_max_name = requantize_node.input[4]

            quantized_node_op = i[-1][0]

            new_node = node_def_pb2.NodeDef()

            new_node.op = quantized_node_op + "AndRequantize"
            new_node.name = requantize_node_name
            for _, value in enumerate(quantized_node.input):
                new_node.input.append(value)

            new_node.input.append(requested_output_min_name)
            new_node.input.append(requested_output_max_name)
            if 'Tinput' in quantized_node.attr:
                new_node.attr["Tinput"].CopyFrom(quantized_node.attr['Tinput'])
            if 'Tfilter' in quantized_node.attr:
                new_node.attr["Tfilter"].CopyFrom(
                    quantized_node.attr['Tfilter'])
            if 'strides' in quantized_node.attr:
                new_node.attr["strides"].CopyFrom(
                    quantized_node.attr['strides'])
            if 'padding' in quantized_node.attr:
                new_node.attr["padding"].CopyFrom(
                    quantized_node.attr['padding'])

            parent_node_name = Helper.node_name_from_input(
                quantized_node.input[0])
            max_filter_node = self.graph_info[new_node.input[6]].node
            min_filter_node = self.graph_info[new_node.input[5]].node
            last_node = self.graph_info[new_node.input[0]].node
            if last_node.op.find('Requantize') != -1:
                bias_node = self.graph_info[new_node.input[2]].node
                max_input_node = self.graph_info[last_node.input[-1]].node
                min_input_node = self.graph_info[last_node.input[-2]].node
                min_input = (min_input_node.attr['value'].tensor.float_val)[0]
                max_input = (max_input_node.attr['value'].tensor.float_val)[0]
                if 'Depthwise' in quantized_node_op or requantize_node.op.find(
                        'PerChannel') != -1:
                    channel_size = max_filter_node.attr[
                        'value'].tensor.tensor_shape.dim[0].size
                    max_filter_tensor = tensor_util.MakeNdarray(
                        min_filter_node.attr['value'].tensor)
                    min_filter_tensor = tensor_util.MakeNdarray(
                        min_filter_node.attr['value'].tensor)
                else:
                    channel_size = 1
                    max_filter_tensor = []
                    min_filter_tensor = []
                    max_filter_tensor.append(
                        (max_filter_node.attr['value'].tensor.float_val)[0])
                    min_filter_tensor.append(
                        (min_filter_node.attr['value'].tensor.float_val)[0])
                bias_tensor = tensor_util.MakeNdarray(self.graph_info[
                    new_node.input[2]].node.attr['value'].tensor)
                bias_length = bias_tensor.shape[0]
                scales = []
                for i in range(channel_size):
                    scales.append(255.0 * 127.0 /
                                  (max(abs(max_input), abs(min_input)) *
                                   max(abs(max_filter_tensor[i]),
                                       abs(min_filter_tensor[i]))))

                int32_bias = []
                if channel_size > 1:
                    for i in range(bias_length):
                        int32_bias.append((int)(bias_tensor[i] * scales[i]))
                else:
                    for i in range(bias_length):
                        int32_bias.append((int)(bias_tensor[i] * scales[0]))

                bias_node.attr['dtype'].CopyFrom(
                    attr_value_pb2.AttrValue(type=float32_type if self.
                                             device == 'gpu' else qint32_type))
                bias_node.attr['value'].CopyFrom(
                    attr_value_pb2.AttrValue(
                        tensor=tensor_util.make_tensor_proto(
                            bias_tensor if self.device == 'gpu' else
                            int32_bias, dtypes.float32 if self.device ==
                            'gpu' else dtypes.int32, bias_tensor.shape)))

                bias_node.attr['value'].tensor.dtype = float32_type \
                                        if self.device == 'gpu' else qint32_type
                new_node.attr["Tbias"].CopyFrom(attr_value_pb2.AttrValue(type=float32_type \
                                                if self.device == 'gpu' else qint32_type))
            else:
                new_node.attr["Tbias"].CopyFrom(
                    attr_value_pb2.AttrValue(type=float32_type))

            if "padding_list" in quantized_node.attr:
                new_node.attr["padding_list"].CopyFrom(
                    quantized_node.attr['padding_list'])
            if "dilations" in quantized_node.attr:
                new_node.attr["dilations"].CopyFrom(
                    quantized_node.attr['dilations'])

            if quantized_node.op == "QuantizedConv2D" or \
                    quantized_node.op == "QuantizedConv2DWithBias":
                new_node.attr["out_type"].CopyFrom(
                    attr_value_pb2.AttrValue(type=int8_type))
            else:
                new_node.attr["out_type"].CopyFrom(
                    attr_value_pb2.AttrValue(type=uint8_type))
            self.graph_analyzer.replace_single_node(
                new_node, [parent_node_name], quantized_node_name,
                [self.graph_info[requantize_node_name].outputs[0]],
                requantize_node_name)
            self.graph_analyzer.remove_node(quantized_node_name)

        target_nodes = self.graph_analyzer.query_fusion_pattern_nodes(
            self.sum_pattern)
        while target_nodes:
            i = target_nodes[0]
            quantized_node_name = i[0]
            quantized_node = self.graph_info[quantized_node_name].node
            requantize_node_name = i[1]
            requantize_node = self.graph_info[requantize_node_name].node
            requested_output_min_name = requantize_node.input[3]
            requested_output_max_name = requantize_node.input[4]

            quantized_node_op = i[-1][0]

            new_node = node_def_pb2.NodeDef()

            new_node.op = quantized_node_op + "AndRequantize"
            new_node.name = requantize_node_name

            for _, value in enumerate(quantized_node.input[:-1]):
                new_node.input.append(value)

            new_node.attr["Tinput"].CopyFrom(quantized_node.attr['Tinput'])
            new_node.attr["Tfilter"].CopyFrom(quantized_node.attr['Tfilter'])
            new_node.attr["strides"].CopyFrom(quantized_node.attr['strides'])
            new_node.attr["padding"].CopyFrom(quantized_node.attr['padding'])

            new_node.input.append(requested_output_min_name)
            new_node.input.append(requested_output_max_name)
            deq_node = self.graph_info[Helper.node_name_from_input(
                quantized_node.input[-1])].node
            if deq_node.op != 'Dequantize' or deq_node.op.find(
                    "Quantize") != -1:
                self.logger.debug(
                    'Dropping fusion due to unsupported pattern..... {}'.
                    format(i))
                target_nodes.remove(i)
                continue
            if deq_node.op == 'Dequantize':
                original_summand_node = self.graph_info[
                    Helper.node_name_from_input(deq_node.input[0])].node
            else:
                original_summand_node = deq_node
            summand_op_type = uint8_type if dtypes.as_dtype(
                deq_node.attr["T"].type) == uint8_type else int8_type

            for j in range(3):
                new_node.input.append(original_summand_node.name +
                                      ':{}'.format(j))

            if "padding_list" in quantized_node.attr:
                new_node.attr["padding_list"].CopyFrom(
                    quantized_node.attr['padding_list'])

            if "dilations" in quantized_node.attr:
                new_node.attr["dilations"].CopyFrom(
                    quantized_node.attr['dilations'])
            new_node.attr["out_type"].CopyFrom(
                attr_value_pb2.AttrValue(type=uint8_type))

            new_node.attr["Tbias"].CopyFrom(
                attr_value_pb2.AttrValue(type=float32_type))

            if summand_op_type == int8_type:
                new_node.op = "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize"
            new_node.attr["Tsummand"].CopyFrom(
                attr_value_pb2.AttrValue(type=summand_op_type))

            self.graph_analyzer.replace_single_node(
                new_node,
                [quantized_node.input[0], original_summand_node.name],
                quantized_node.name,
                self.graph_info[requantize_node_name].outputs,
                requantize_node_name)
            self.graph_analyzer.remove_node(quantized_node_name)

            if deq_node.op == 'Dequantize':
                self.graph_analyzer.remove_node_with_single_input_output(
                    deq_node.name)
            target_nodes.remove(i)

        return self.graph_analyzer.dump_graph()
Пример #43
0
 def testPythonTypesConversion(self):
   self.assertIs(dtypes.float32, dtypes.as_dtype(float))
   self.assertIs(dtypes.bool, dtypes.as_dtype(bool))
Пример #44
0
    def add_weight(self,
                   name,
                   shape,
                   dtype=None,
                   initializer=None,
                   regularizer=None,
                   trainable=None,
                   constraint=None,
                   use_resource=None,
                   synchronization=vs.VariableSynchronization.AUTO,
                   aggregation=vs.VariableAggregation.NONE,
                   partitioner=None,
                   **kwargs):
        """Adds a new variable to the layer, or gets an existing one; returns it.

    Arguments:
      name: variable name.
      shape: variable shape.
      dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
      initializer: initializer instance (callable).
      regularizer: regularizer instance (callable).
      trainable: whether the variable should be part of the layer's
        "trainable_variables" (e.g. variables, biases)
        or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
        Note, if the current variable scope is marked as non-trainable
        then this parameter is ignored and any added variables are also
        marked as non-trainable. `trainable` defaults to `True` unless
        `synchronization` is set to `ON_READ`.
      constraint: constraint instance (callable).
      use_resource: Whether to use `ResourceVariable`.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.
      partitioner: (optional) partitioner instance (callable).  If
        provided, when the requested variable is created it will be split
        into multiple partitions according to `partitioner`.  In this case,
        an instance of `PartitionedVariable` is returned.  Available
        partitioners include `tf.compat.v1.fixed_size_partitioner` and
        `tf.compat.v1.variable_axis_size_partitioner`.  For more details, see
        the documentation of `tf.compat.v1.get_variable` and the  "Variable
        Partitioners and Sharding" section of the API guide.
      **kwargs: Additional keyword arguments.

    Returns:
      The created variable.  Usually either a `Variable` or `ResourceVariable`
      instance.  If `partitioner` is not `None`, a `PartitionedVariable`
      instance is returned.

    Raises:
      RuntimeError: If called with partitioned variable regularization and
        eager execution is enabled.
      ValueError: When trainable has been set to True with synchronization
        set as `ON_READ`.
    """
        for kwarg in kwargs:
            if kwarg != 'experimental_autocast':
                raise TypeError('Unknown keyword argument:', kwarg)
        if self._keras_style:
            return super(Layer, self).add_weight(
                name=name,
                shape=shape,
                dtype=dtype,
                initializer=initializer,
                regularizer=regularizer,
                trainable=trainable and self.trainable,
                constraint=constraint,
                use_resource=use_resource,
                synchronization=vs.VariableSynchronization.AUTO,
                aggregation=vs.VariableAggregation.NONE,
                partitioner=partitioner,
                **kwargs)

        if synchronization == vs.VariableSynchronization.ON_READ:
            if trainable:
                raise ValueError(
                    'Synchronization value can be set to '
                    'VariableSynchronization.ON_READ only for non-trainable variables. '
                    'You have specified trainable=True and '
                    'synchronization=VariableSynchronization.ON_READ.')
            else:
                # Set trainable to be false when variable is to be synced on read.
                trainable = False
        elif trainable is None:
            trainable = True

        def _should_add_regularizer(variable, existing_variable_set):
            if isinstance(variable, tf_variables.PartitionedVariable):
                for var in variable:
                    if var in existing_variable_set:
                        return False
                return True
            else:
                return variable not in existing_variable_set

        init_graph = None
        if not context.executing_eagerly():
            default_graph = ops.get_default_graph()
            if default_graph.building_function:
                with ops.init_scope():
                    # Retrieve the variables from the graph into which variables
                    # will be lifted; if initialization ops will be lifted into
                    # the eager context, then there is nothing to retrieve, since variable
                    # collections are not supported when eager execution is enabled.
                    if not context.executing_eagerly():
                        init_graph = ops.get_default_graph()
                        existing_variables = set(
                            tf_variables.global_variables())
            else:
                # Initialization ops will not be lifted out of the default graph.
                init_graph = default_graph
                existing_variables = set(tf_variables.global_variables())

        if dtype is None:
            dtype = self.dtype or dtypes.float32

        self._set_scope(None)
        reuse = self.built or self._reuse
        prev_len_trainable = len(self._trainable_weights)
        with vs.variable_scope(self._scope,
                               reuse=reuse,
                               auxiliary_name_scope=False) as scope:
            self._current_scope = scope
            with ops.name_scope(self._name_scope()):
                use_resource = (use_resource or self._use_resource_variables
                                or scope.use_resource)
                if initializer is None:
                    initializer = scope.initializer
                variable = super(Layer, self).add_weight(
                    name,
                    shape,
                    dtype=dtypes.as_dtype(dtype),
                    initializer=initializer,
                    trainable=trainable and self.trainable,
                    constraint=constraint,
                    partitioner=partitioner,
                    use_resource=use_resource,
                    synchronization=synchronization,
                    aggregation=aggregation,
                    getter=vs.get_variable,
                    **kwargs)

                if regularizer:
                    if (ops.executing_eagerly_outside_functions()
                            or _should_add_regularizer(variable,
                                                       existing_variables)):
                        self._handle_weight_regularization(
                            name, variable, regularizer)

                if init_graph is not None:
                    # Handle edge case where a custom getter has overridden `trainable`.
                    # There is one known occurrence of this, in unit test
                    # testBasicRNNCellNotTrainable in
                    # contrib.rnn.python.kernel_tests.core_rnn_cell_test
                    with init_graph.as_default():
                        trainable_variables = tf_variables.trainable_variables(
                        )
                    if (trainable and self.trainable
                            and variable not in trainable_variables):
                        # A custom getter / variable scope overrode the trainable flag.
                        extra_trainable_vars = self._trainable_weights[
                            prev_len_trainable:]
                        self._trainable_weights = self._trainable_weights[:
                                                                          prev_len_trainable]
                        self._non_trainable_weights += extra_trainable_vars
        return variable
 def __call__(self, shape, dtype=dtypes.float32):
     dtype = dtypes.as_dtype(dtype)
     return array_ops.zeros(shape, dtype)
Пример #46
0
def _IsTrainable(tensor):
  dtype = dtypes.as_dtype(tensor.dtype)
  return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
                              dtypes.complex64, dtypes.complex128)
Пример #47
0
def random_uniform(shape,
                   minval=0,
                   maxval=None,
                   dtype=dtypes.float32,
                   seed=None,
                   name=None):
    """Outputs random values from a uniform distribution.

  The generated values follow a uniform distribution in the range
  `[minval, maxval)`. The lower bound `minval` is included in the range, while
  the upper bound `maxval` is excluded.

  For floats, the default range is `[0, 1)`.  For ints, at least `maxval` must
  be specified explicitly.

  In the integer case, the random integers are slightly biased unless
  `maxval - minval` is an exact power of two.  The bias is small for values of
  `maxval - minval` significantly smaller than the range of the output (either
  `2**32` or `2**64`).

  Args:
    shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
    minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
      range of random values to generate.  Defaults to 0.
    maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
      the range of random values to generate.  Defaults to 1 if `dtype` is
      floating point.
    dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,
      or `int64`.
    seed: A Python integer. Used to create a random seed for the distribution.
      See `tf.set_random_seed`
      for behavior.
    name: A name for the operation (optional).

  Returns:
    A tensor of the specified shape filled with random uniform values.

  Raises:
    ValueError: If `dtype` is integral and `maxval` is not specified.
  """
    dtype = dtypes.as_dtype(dtype)
    if dtype not in (dtypes.float16, dtypes.bfloat16, dtypes.float32,
                     dtypes.float64, dtypes.int32, dtypes.int64):
        raise ValueError("Invalid dtype %r" % dtype)
    if maxval is None:
        if dtype.is_integer:
            raise ValueError("Must specify maxval for integer dtype %r" %
                             dtype)
        maxval = 1
    with ops.name_scope(name, "random_uniform",
                        [shape, minval, maxval]) as name:
        shape = _ShapeTensor(shape)
        minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
        maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
        seed1, seed2 = random_seed.get_seed(seed)
        if dtype.is_integer:
            return gen_random_ops.random_uniform_int(shape,
                                                     minval,
                                                     maxval,
                                                     seed=seed1,
                                                     seed2=seed2,
                                                     name=name)
        else:
            rnd = gen_random_ops.random_uniform(shape,
                                                dtype,
                                                seed=seed1,
                                                seed2=seed2)
            return math_ops.add(rnd * (maxval - minval), minval, name=name)
Пример #48
0
 def __init__(self, name=None, dtype=None):
     super(Metric, self).__init__(name=name, dtype=dtype)
     self.stateful = True  # All metric layers are stateful.
     self.built = True
     self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(
         dtype).name
Пример #49
0
 def testPythonLongConversion(self):
   self.assertIs(dtypes.int64, dtypes.as_dtype(np.array(2**32).dtype))
Пример #50
0
  def testNumpyConversion(self):
    self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
    self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
    self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
    self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
    self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
    self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
    self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
    self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
    self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
    self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
    self.assertIs(dtypes.string, dtypes.as_dtype(np.object_))
    self.assertIs(dtypes.string,
                  dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
    self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool_))
    with self.assertRaises(TypeError):
      dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))

    class AnObject(object):
      dtype = "f4"

    self.assertIs(dtypes.float32, dtypes.as_dtype(AnObject))

    class AnotherObject(object):
      dtype = np.dtype(np.complex64)

    self.assertIs(dtypes.complex64, dtypes.as_dtype(AnotherObject))
Пример #51
0
 def testAllPybind11DTypeConvertibleToDType(self):
   for datatype_enum in types_pb2.DataType.values():
     if datatype_enum == types_pb2.DT_INVALID:
       continue
     dtype = _dtypes.DType(datatype_enum)
     self.assertEqual(dtypes.as_dtype(datatype_enum), dtype)
def random_normal_correlated_columns(shape,
                                     mean=0.0,
                                     stddev=1.0,
                                     dtype=dtypes.float32,
                                     eps=1e-4,
                                     seed=None):
    """Batch matrix with (possibly complex) Gaussian entries and correlated cols.

  Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
  living close to an embedded hyperplane.

  Suppose `shape[-2:] = (M, N)`.

  If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.

  If `M >= N`, then the colums of `A` will be made almost dependent as follows:

  ```
  L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
  B = random normal M x N-1 matrix, mean = 0, stddev = stddev.

  G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
  E = a random normal M x N matrix, mean = 0, stddev = eps
  mu = a constant M x N matrix, equal to the argument "mean"

  A = G + E + mu
  ```

  Args:
    shape:  Python list of integers.
      Shape of the returned tensor.  Must be at least length two.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    eps:  Distance each column is perturbed from the low-dimensional subspace.
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.

  Raises:
    ValueError:  If `shape` is not at least length 2.
  """
    dtype = dtypes.as_dtype(dtype)

    if len(shape) < 2:
        raise ValueError(
            "Argument shape must be at least length 2.  Found: %s" % shape)

    # Shape is the final shape, e.g. [..., M, N]
    shape = list(shape)
    batch_shape = shape[:-2]
    m, n = shape[-2:]

    # If there is only one column, "they" are by definition correlated.
    if n < 2 or n < m:
        return random_normal(shape,
                             mean=mean,
                             stddev=stddev,
                             dtype=dtype,
                             seed=seed)

    # Shape of the matrix with only n - 1 columns that we will embed in higher
    # dimensional space.
    smaller_shape = batch_shape + [m, n - 1]

    # Shape of the embedding matrix, mapping batch matrices
    # from [..., N-1, M] to [..., N, M]
    embedding_mat_shape = batch_shape + [n, n - 1]

    # This stddev for the embedding_mat ensures final result has correct stddev.
    stddev_mat = 1 / np.sqrt(n - 1)

    with ops.name_scope("random_normal_correlated_columns"):
        smaller_mat = random_normal(smaller_shape,
                                    mean=0.0,
                                    stddev=stddev_mat,
                                    dtype=dtype,
                                    seed=seed)

        if seed is not None:
            seed += 1287

        embedding_mat = random_normal(embedding_mat_shape,
                                      dtype=dtype,
                                      seed=seed)

        embedded_t = math_ops.matmul(embedding_mat,
                                     smaller_mat,
                                     transpose_b=True)
        embedded = array_ops.matrix_transpose(embedded_t)

        mean_mat = array_ops.ones_like(embedded) * mean

        return embedded + random_normal(shape, stddev=eps,
                                        dtype=dtype) + mean_mat
Пример #53
0
 def testStringConversion(self):
   self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
   self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
   self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
   self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
   self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
   self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
   self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
   self.assertIs(dtypes.string, dtypes.as_dtype("string"))
   self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
   self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
   self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
   self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
   self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
   self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
   self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
   self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
   self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
   self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
   self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
   self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
   self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
   self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
   self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
   self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
   self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
   self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
   self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
   self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
   self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
   self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
   self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
   with self.assertRaises(TypeError):
     dtypes.as_dtype("not_a_type")
  def __init__(self,
               num_rows,
               batch_shape=None,
               dtype=None,
               is_non_singular=True,
               is_self_adjoint=True,
               is_positive_definite=True,
               is_square=True,
               assert_proper_shapes=False,
               name="LinearOperatorIdentity"):
    r"""Initialize a `LinearOperatorIdentity`.

    The `LinearOperatorIdentity` is initialized with arguments defining `dtype`
    and shape.

    This operator is able to broadcast the leading (batch) dimensions, which
    sometimes requires copying data.  If `batch_shape` is `None`, the operator
    can take arguments of any batch shape without copying.  See examples.

    Args:
      num_rows:  Scalar non-negative integer `Tensor`.  Number of rows in the
        corresponding identity matrix.
      batch_shape:  Optional `1-D` integer `Tensor`.  The shape of the leading
        dimensions.  If `None`, this operator has no leading dimensions.
      dtype:  Data type of the matrix that this operator represents.
      is_non_singular:  Expect that this operator is non-singular.
      is_self_adjoint:  Expect that this operator is equal to its hermitian
        transpose.
      is_positive_definite:  Expect that this operator is positive definite,
        meaning the quadratic form `x^H A x` has positive real part for all
        nonzero `x`.  Note that we do not require the operator to be
        self-adjoint to be positive-definite.  See:
        https://en.wikipedia.org/wiki/Positive-definite_matrix\
            #Extension_for_non_symmetric_matrices
      is_square:  Expect that this operator acts like square [batch] matrices.
      assert_proper_shapes:  Python `bool`.  If `False`, only perform static
        checks that initialization and method arguments have proper shape.
        If `True`, and static checks are inconclusive, add asserts to the graph.
      name: A name for this `LinearOperator`

    Raises:
      ValueError:  If `num_rows` is determined statically to be non-scalar, or
        negative.
      ValueError:  If `batch_shape` is determined statically to not be 1-D, or
        negative.
      ValueError:  If any of the following is not `True`:
        `{is_self_adjoint, is_non_singular, is_positive_definite}`.
    """
    dtype = dtype or dtypes.float32
    self._assert_proper_shapes = assert_proper_shapes

    with ops.name_scope(name):
      dtype = dtypes.as_dtype(dtype)
      if not is_self_adjoint:
        raise ValueError("An identity operator is always self adjoint.")
      if not is_non_singular:
        raise ValueError("An identity operator is always non-singular.")
      if not is_positive_definite:
        raise ValueError("An identity operator is always positive-definite.")
      if not is_square:
        raise ValueError("An identity operator is always square.")

      super(LinearOperatorIdentity, self).__init__(
          dtype=dtype,
          is_non_singular=is_non_singular,
          is_self_adjoint=is_self_adjoint,
          is_positive_definite=is_positive_definite,
          is_square=is_square,
          name=name)

      self._num_rows = linear_operator_util.shape_tensor(
          num_rows, name="num_rows")
      self._num_rows_static = tensor_util.constant_value(self._num_rows)
      self._check_num_rows_possibly_add_asserts()

      if batch_shape is None:
        self._batch_shape_arg = None
      else:
        self._batch_shape_arg = linear_operator_util.shape_tensor(
            batch_shape, name="batch_shape_arg")
        self._batch_shape_static = tensor_util.constant_value(
            self._batch_shape_arg)
        self._check_batch_shape_possibly_add_asserts()
Пример #55
0
 def testAllTypesConvertibleToDType(self):
   for datatype_enum in types_pb2.DataType.values():
     if datatype_enum == types_pb2.DT_INVALID:
       continue
     dt = dtypes.as_dtype(datatype_enum)
     self.assertEqual(datatype_enum, dt.as_datatype_enum)
Пример #56
0
 def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color="red"):
   self.x_shape = tensor_shape.as_shape(x_shape)
   self.x_dtype = dtypes.as_dtype(x_dtype)
   self.y_shape = tensor_shape.as_shape(y_shape)
   self.y_dtype = dtypes.as_dtype(y_dtype)
   self.color = color
Пример #57
0
def _is_convertible_to_dtype(dtype):
    try:
        dtypes.as_dtype(dtype)
        return True
    except TypeError:
        return False
Пример #58
0
 def testAsDtypeInvalidArgument(self):
   with self.assertRaises(TypeError):
     dtypes.as_dtype((dtypes.int32, dtypes.float32))
Пример #59
0
    def add_variable(self,
                     name,
                     shape,
                     dtype=None,
                     initializer=None,
                     regularizer=None,
                     trainable=True):
        """Adds a new variable to the layer, or gets an existing one; returns it.

    Arguments:
      name: variable name.
      shape: variable shape.
      dtype: The type of the variable. Defaults to `self.dtype`.
      initializer: initializer instance (callable).
      regularizer: regularizer instance (callable).
      trainable: whether the variable should be part of the layer's
        "trainable_variables" (e.g. variables, biases)
        or "non_trainable_variables" (e.g. BatchNorm mean, stddev).

    Returns:
      The created variable.
    """
        if dtype is None:
            dtype = self.dtype
        existing_variables = set(tf_variables.global_variables())

        self._set_scope(None)

        with vs.variable_scope(self._scope, reuse=self.built
                               or self._reuse) as scope:
            with ops.name_scope(scope.original_name_scope):
                variable = vs.get_variable(name,
                                           shape=shape,
                                           initializer=initializer,
                                           dtype=dtypes.as_dtype(dtype),
                                           trainable=trainable
                                           and self.trainable)
                if variable in existing_variables:
                    return variable
                if regularizer:
                    # To match the behavior of tf.get_variable(), we only
                    # apply regularization if the variable is newly created.
                    if isinstance(variable, tf_variables.PartitionedVariable):
                        for v in variable:
                            with ops.colocate_with(v.op):
                                with ops.name_scope(name + '/Regularizer'):
                                    regularization = regularizer(v)
                            if regularization is not None:
                                self.add_loss(regularization)
                                _add_elements_to_collection(
                                    regularization,
                                    ops.GraphKeys.REGULARIZATION_LOSSES)
                    else:
                        with ops.colocate_with(variable.op):
                            with ops.name_scope(name + '/Regularizer'):
                                regularization = regularizer(variable)
                        if regularization is not None:
                            self.add_loss(regularization)
                            _add_elements_to_collection(
                                regularization,
                                ops.GraphKeys.REGULARIZATION_LOSSES)
        if trainable:
            self._trainable_weights.append(variable)
        else:
            self._non_trainable_weights.append(variable)
        return variable
Пример #60
0
 def testAsDtypeReturnsInternedVersion(self):
   dt = dtypes.DType(types_pb2.DT_VARIANT)
   self.assertIs(dtypes.as_dtype(dt), dtypes.variant)