def testShape(self):
   for dtype in self.numeric_types:
     init = np.ones([2, 3]).astype(dtype)
     with self.test_session() as session, self.test_scope():
       v = resource_variable_ops.ResourceVariable(init)
       session.run(variables.variables_initializer([v]))
       h = v.handle
       s32, s64 = session.run([
           resource_variable_ops.variable_shape(h),
           resource_variable_ops.variable_shape(h, out_type=dtypes.int64)
       ])
       self.assertEqual(s32.dtype, np.int32)
       self.assertEqual(s64.dtype, np.int64)
       self.assertAllEqual(s32, [2, 3])
       self.assertAllEqual(s64, [2, 3])
Exemple #2
0
  def testResources(self):
    # Produce GraphDef containing a ops producing and consuming resources.
    graph = ops.Graph()
    with graph.as_default():
      var = resource_variable_ops.ResourceVariable(1.0)
      var_assign = var.assign(2.0)
      # Use an op that requires handle shape to be set.
      var_shape = resource_variable_ops.variable_shape(var.handle)
      init = variables.global_variables_initializer()
    graph_def = graph.as_graph_def()

    # Import the GraphDef.
    with ops.Graph().as_default():
      # pylint: disable=unused-variable
      imported_var, imported_assign, imported_shape, imported_init = (
          importer.import_graph_def(
              graph_def,
              return_elements=[var.name, var_assign.name, var_shape.name,
                               init.name]))

      # Make sure the handle shape is set on the imported variable.
      new_var_shape = resource_variable_ops.variable_shape(imported_var)
Exemple #3
0
def _PluginDenseBackProp(op, top_grad):
    top_grad = _IndexedSlicesToTensorDisableWarning(top_grad)
    emb_var_grads_value, value_index = plugin_bprop(
        emb_handle=op.inputs[1],
        global_replica_id=op.inputs[3],
        top_gradient=top_grad,
        unique_op_name=op.get_attr("unique_op_name"))
    emb_var_grads_value = array_ops.gen_math_ops.cast(
        emb_var_grads_value, DstT=array_ops.dtypes.float32)

    params_shape = resource_variable_ops.variable_shape(handle=op.inputs[0])

    grads = ops.IndexedSlices(values=emb_var_grads_value,
                              indices=value_index,
                              dense_shape=params_shape)

    return [grads] + [None for _ in op.inputs[1:]]
Exemple #4
0
def _GetGrad(grads, t, unconnected_gradients):
    """Gets gradient for tensor "t"."""
    op = t.op
    op_grads = grads.get(op)
    if not op_grads:
        if unconnected_gradients == UnconnectedGradients.ZERO:
            t_dtype = default_gradient.get_zeros_dtype(t)
            if t.dtype == dtypes.resource:
                return array_ops.zeros(resource_variable_ops.variable_shape(t),
                                       dtype=t_dtype)
            else:
                return array_ops.zeros_like(t, dtype=t_dtype)
        elif unconnected_gradients == UnconnectedGradients.NONE:
            return None
        else:
            raise ValueError("Unknown value for unconnected_gradients: %r" %
                             unconnected_gradients)

    t_grad = op_grads[t.value_index]
    assert not isinstance(
        t_grad, list), ("gradients list should have been aggregated by now.")
    return t_grad
 def testVariableShape(self):
   v = resource_variable_ops.ResourceVariable([1., 1.])
   self.assertAllEqual(
       tensor_util.constant_value(
           resource_variable_ops.variable_shape(v.handle)),
       [2])
 def testVariableShape(self):
     v = resource_variable_ops.ResourceVariable([1., 1.])
     self.assertAllEqual(
         tensor_util.constant_value(
             resource_variable_ops.variable_shape(v.handle)), [2])
  def ZerosLikeV1WhileLoop(self, op, index):
    """Create zeros_like for the specified output of an op.

    If op is in a while loop that is part of gradients(), this method
    must be called in its grad loop context.

    Args:
      op: A tensorflow operation.
      index: the index for a specific output of the op.

    Returns:
      A zero tensor of the same shape of op.outputs[index].
    """
    if util.IsLoopSwitch(op):
      return None
    if op.graph.building_function:
      # The optimization here is tricky to apply to functions
      return array_ops.zeros_like(op.outputs[index])
    dead_branch = util.IsSwitch(op)
    forward_ctxt = util.GetWhileContext(op)
    grad_state = self._map.get(forward_ctxt)
    if grad_state is None:
      # op is not in a while loop that is part of gradients().
      return ZerosLike(op, index)
    op_ctxt = op._get_control_flow_context()
    val = ops.convert_to_tensor(op.outputs[index], name="tensor")
    shape = val.get_shape()
    if shape.is_fully_defined():
      # If the shape is known statically, just create a zero tensor with
      # the right shape in the grad loop context.
      if val.dtype == dtypes.resource:
        result = array_ops.zeros(
            resource_variable_ops.variable_shape(val),
            dtype=default_gradient.get_zeros_dtype(val))
      else:
        result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
      if dead_branch:
        # op is a cond switch. Guard the zero tensor with a switch.
        pred = grad_state.history_map.get(op_ctxt.pred.name)
        branch = op_ctxt.branch
        result = control_flow_ops._SwitchRefOrTensor(result, pred)[1 - branch]
    else:
      # Unknown shape so keep a history of the shape at runtime.
      if dead_branch:
        # Need to add a special switch to guard the value.
        pred = op_ctxt.pred
        branch = op_ctxt.branch
        op_ctxt.outer_context.Enter()
        val = control_flow_ops._SwitchRefOrTensor(op.inputs[0],
                                                  pred)[1 - branch]
        zeros_shape = array_ops.shape_internal(val, optimize=False)
        op_ctxt.outer_context.Exit()
        val.op._set_control_flow_context(op_ctxt)
        zeros_shape.op._set_control_flow_context(op_ctxt)
      else:
        op_ctxt.Enter()
        zeros_shape = array_ops.shape_internal(val, optimize=False)
        op_ctxt.Exit()

      # Add forward accumulator for shape.
      grad_state.grad_context.Exit()
      history_zeros_shape = grad_state.AddForwardAccumulator(
          zeros_shape, dead_branch=dead_branch)
      grad_state.grad_context.Enter()

      # Create a zero tensor with the right shape.
      shape = grad_state.AddBackpropAccumulatedValue(history_zeros_shape,
                                                     zeros_shape, dead_branch)
      result = array_ops.zeros(shape, val.dtype)
    return result