Пример #1
0
def alphas_like(tensor, alpha_value, name=None, optimize=True):
    """Creates a tensor with all elements set to `alpha_value`.
    Given a single tensor (`tensor`), this operation returns a tensor of the same
    type and shape as `tensor` with all elements set to `alpha_value`.

    Parameters
    ----------
    tensor: tf.Tensor
        The Tensorflow Tensor that will be used as a template.
    alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
        The value used to fill the resulting `Tensor`.
    name: str
        A name for the operation (optional).
    optimize: bool
        if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.

    Returns
    -------
    A `Tensor` with all elements set to `alpha_value`.

    Examples
    --------
    >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
    >>> tl.alphas_like(tensor, 0.5)  # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
    """

    with ops.name_scope(name, "alphas_like", [tensor]) as name:
        tensor = ops.convert_to_tensor(tensor, name="tensor")

        if context.in_eager_mode(
        ):  #and dtype is not None and dtype != tensor.dtype:
            ret = alphas(shape_internal(tensor, optimize=optimize),
                         alpha_value=alpha_value,
                         name=name)

        else:  # if context.in_graph_mode():

            # For now, variant types must be created via zeros_like; as we need to
            # pass the input variant object to the proper zeros callback.

            if (optimize and tensor.shape.is_fully_defined()):
                # We can produce a zeros tensor independent of the value of 'tensor',
                # since the shape is known statically.
                ret = alphas(tensor.shape, alpha_value=alpha_value, name=name)

            # elif dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
            else:
                ret = alphas(shape_internal(tensor, optimize=optimize),
                             alpha_value=alpha_value,
                             name=name)

            ret.set_shape(tensor.get_shape())

        return ret
Пример #2
0
def alphas_like(tensor, alpha_value, name=None, optimize=True):
    """Creates a tensor with all elements set to `alpha_value`.
    Given a single tensor (`tensor`), this operation returns a tensor of the same
    type and shape as `tensor` with all elements set to `alpha_value`.

    Parameters
    ----------
    tensor: tf.Tensor
        The Tensorflow Tensor that will be used as a template.
    alpha_value: `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, int32`, `int64`
        The value used to fill the resulting `Tensor`.
    name: str
        A name for the operation (optional).
    optimize: bool
        if true, attempt to statically determine the shape of 'tensor' and encode it as a constant.

    Returns
    -------
    A `Tensor` with all elements set to `alpha_value`.

    Examples
    --------
    >>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
    >>> tl.alphas_like(tensor, 0.5)  # [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]]
    """
    with ops.name_scope(name, "alphas_like", [tensor]) as name:
        tensor = ops.convert_to_tensor(tensor, name="tensor")

        if context.in_eager_mode():  # and dtype is not None and dtype != tensor.dtype:
            ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)

        else:  # if context.in_graph_mode():

            # For now, variant types must be created via zeros_like; as we need to
            # pass the input variant object to the proper zeros callback.

            if (optimize and tensor.shape.is_fully_defined()):
                # We can produce a zeros tensor independent of the value of 'tensor',
                # since the shape is known statically.
                ret = alphas(tensor.shape, alpha_value=alpha_value, name=name)

            # elif dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
            else:
                ret = alphas(shape_internal(tensor, optimize=optimize), alpha_value=alpha_value, name=name)

            ret.set_shape(tensor.get_shape())

        return ret
Пример #3
0
 def testShape(self):
     shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
     for dtype in self.numeric_types:
         self._assertOpOutputMatchesExpected(shape_op,
                                             dtype(7),
                                             expected=np.array(
                                                 [], dtype=np.int32))
         self._assertOpOutputMatchesExpected(
             shape_op,
             np.array([[], []], dtype=dtype),
             expected=np.array([2, 0], dtype=np.int32))
         self._assertOpOutputMatchesExpected(shape_op,
                                             np.array([-1, 1], dtype=dtype),
                                             expected=np.array(
                                                 [2], dtype=np.int32))
         self._assertOpOutputMatchesExpected(shape_op,
                                             np.array([[-1, 1]],
                                                      dtype=dtype),
                                             expected=np.array(
                                                 [1, 2], dtype=np.int32))
         self._assertOpOutputMatchesExpected(shape_op,
                                             np.array([[-1], [1], [4]],
                                                      dtype=dtype),
                                             expected=np.array(
                                                 [3, 1], dtype=np.int32))
Пример #4
0
def ZerosLikeOutsideLoop(op, index):
    """Create zeros_like for the specified output of an op."""
    val = op.outputs[index]
    if not util.IsSwitch(op):
        if val.dtype == dtypes.resource:
            return array_ops.zeros(
                gen_resource_variable_ops.variable_shape(val),
                dtype=default_gradient.get_zeros_dtype(val))
        return array_ops.zeros_like(val, optimize=False)
    else:
        op_ctxt = op._get_control_flow_context()
        if op_ctxt:
            # We are in a cond context. Use a switch to create zeros only when needed.
            pred = op_ctxt.pred
            branch = op_ctxt.branch
            switch_val = control_flow_ops.switch(op.inputs[0],
                                                 pred)[1 - branch]
            # A op is created along the branch taken as control dependencies are on
            # the whole op and not on the tensor output.
            pivot = array_ops.identity(switch_val)
            if val.dtype == dtypes.resource:
                with ops.control_dependencies([pivot]):
                    return array_ops.zeros(
                        gen_resource_variable_ops.variable_shape(switch_val),
                        dtype=default_gradient.get_zeros_dtype(val))
            zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
            # Ensure ops created within array_ops.zeros are dominated by switch in
            # cond context.
            with ops.control_dependencies([pivot]):
                return array_ops.zeros(zeros_shape, dtype=val.dtype)
        else:
            return array_ops.zeros_like(val, optimize=False)
Пример #5
0
    def ZerosLikeForExit(self, val):
        """Create zeros_like gradient for a loop exit.

    If the result of a loop variable is not used but is involved in
    computing the result of some needed loop variable, we create a
    zero-valued tensor that is fed as gradient for the Exit node of that
    loop variable. Note that val.op is an Exit, and this method must be
    called in the control flow context where gradients() is called.

    Args:
      val: The output tensor of an Exit op.

    Returns:
      A zero tensor of the same shape of val.
    """
        val_shape = val.get_shape()
        forward_ctxt = val.op._get_control_flow_context()
        outer_forward_ctxt = forward_ctxt.outer_context
        if outer_forward_ctxt:
            outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
        outer_grad_state = None
        if outer_forward_ctxt:
            outer_grad_state = self._map.get(outer_forward_ctxt)
        if outer_grad_state:
            # This is a nested loop.
            if val_shape.is_fully_defined():
                # If the shape is known statically, just create a zero tensor
                # with the right shape in the right context.
                outer_grad_state.grad_context.Enter()
                result = array_ops.zeros(val_shape.dims, val.dtype)
                outer_grad_state.grad_context.Exit()
            else:
                # Only the shape of value is needed for backprop.
                forward_ctxt.outer_context.Enter()
                shape = array_ops.shape_internal(val, optimize=False)
                forward_ctxt.outer_context.Exit()
                # Save the shape to a stack.
                history_shape = outer_grad_state.AddForwardAccumulator(shape)
                # Get the shape back from the stack.
                outer_grad_ctxt = outer_grad_state.grad_context
                outer_grad_ctxt.Enter()
                real_shape = outer_grad_state.AddBackpropAccumulatedValue(
                    history_shape, shape)
                result = array_ops.zeros(real_shape, val.dtype)
                outer_grad_ctxt.Exit()
        else:
            # This is not a nested loop.
            if val_shape.is_fully_defined():
                # If the shape is known statically, just create a zero tensor
                # with the right shape.
                result = array_ops.zeros(val_shape.dims, val.dtype)
            else:
                result = array_ops.zeros_like(val, optimize=False)
        return result
Пример #6
0
def _unsorted_segment_N(data, segment_ids, num_segments):
    """ Helper function for unsorted_segment_mean/_sqrtN. Computes the number
      of segment entries with 0-entries set to 1 to allow division by N.
  """
    # bincount doesn't support negative indices so we use unsorted_segment_sum
    segment_ids_shape = array_ops.shape_internal(segment_ids)
    ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
    N = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids,
                                          num_segments)
    # add dimensions for all non-reduced axes
    ndims_output = data.shape.ndims - segment_ids.shape.ndims
    broadcast_shape = [num_segments] + [1] * ndims_output
    N = array_ops.reshape(N, broadcast_shape)
    return gen_math_ops.maximum(N, 1)
Пример #7
0
    def PostProcessing(self):
        """Perform postprocessing at the end of gradients().

    We have created the gradient graph at this point. So this function
    can be used to perform any postprocessing on the gradient graph.
    We currently perform the following postprocessing:
      1. Patch the gradient graph if the output of a loop variable
         doesn't depend on its input.
    """
        for _, grad_state in self._map.items():
            for _, b_merge in grad_state.switch_map.items():
                if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
                    # The value of this loop variable at iteration i+1 doesn't
                    # depend on its value at iteration i. So use zeros as the
                    # gradients for all iterations > 0.
                    dtype = b_merge.op.inputs[0].dtype
                    shape = b_merge.op.inputs[0].get_shape()
                    # pylint: disable=protected-access
                    if shape.is_fully_defined():
                        grad_state.grad_context.Enter()
                        # Create a zeros and use it for iterations > 0.
                        grad_val = constant_op.constant(0,
                                                        dtype=dtype,
                                                        shape=shape)
                        next_grad_val = control_flow_ops._NextIteration(
                            grad_val)
                        grad_state.grad_context.Exit()
                    else:
                        # Create a zeros in the outer grad context.
                        outer_grad_ctxt = grad_state.grad_context.outer_context
                        if outer_grad_ctxt:
                            outer_grad_ctxt.Enter()
                        enter_grad_op = b_merge.op.inputs[0].op
                        enter_grad = enter_grad_op.inputs[0]
                        grad_shape = array_ops.shape_internal(enter_grad,
                                                              optimize=False)
                        grad_val = array_ops.zeros(grad_shape)
                        if outer_grad_ctxt:
                            outer_grad_ctxt.Exit()
                        # Use the zeros for iterations > 0.
                        grad_state.grad_context.Enter()
                        next_grad_val = control_flow_ops._NextIteration(
                            grad_val)
                        grad_state.grad_context.Exit()
                    b_merge.op._update_input(1, next_grad_val)
Пример #8
0
 def testShape(self):
   shape_op = lambda x: array_ops.shape_internal(x, optimize=False)
   for dtype in self.numeric_types:
     self._testUnary(shape_op, dtype(7), expected=np.array([], dtype=np.int32))
     self._testUnary(
         shape_op,
         np.array([[], []], dtype=dtype),
         expected=np.array([2, 0], dtype=np.int32))
     self._testUnary(
         shape_op,
         np.array([-1, 1], dtype=dtype),
         expected=np.array([2], dtype=np.int32))
     self._testUnary(
         shape_op,
         np.array([[-1, 1]], dtype=dtype),
         expected=np.array([1, 2], dtype=np.int32))
     self._testUnary(
         shape_op,
         np.array([[-1], [1], [4]], dtype=dtype),
         expected=np.array([3, 1], dtype=np.int32))
def _ZerosLikeV2(op, index):
  """Branch of ZerosLike for TF2."""
  val = op.outputs[index]
  if val.dtype == dtypes.resource:
    return array_ops.zeros(
        gen_resource_variable_ops.variable_shape(val),
        dtype=default_gradient.get_zeros_dtype(val))
  if (isinstance(val.op.graph, control_flow_v2_func_graphs.WhileBodyFuncGraph)
      and val.dtype != dtypes.variant):
    # In while_v2 we do not want to add a `ZerosLike` op because that will
    # trigger accumulation of `val`. Normally `ZerosLike` is preferred because
    # it helps avoid creating extra nodes(possibly Consts) for the shape.
    # For variants, we must use ZerosLike.
    if val.shape.is_fully_defined():
      return constant_op.constant(0, shape=val.shape.dims, dtype=val.dtype)
    else:
      # Note: Even though we add `Shape` in the default graph, while_v2 is smart
      # enough to place it in the forward graph i.e. `val.graph`.
      zeros_shape = array_ops.shape_internal(val, optimize=False)
      return array_ops.zeros(zeros_shape, val.dtype)
  else:
    return array_ops.zeros_like(val, optimize=False)
Пример #10
0
    def ZerosLike(self, op, index):
        """Create zeros_like for the specified output of an op.

    If op is in a while loop that is part of gradients(), this method
    must be called in its grad loop context.

    Args:
      op: A tensorflow operation.
      index: the index for a specific output of the op.

    Returns:
      A zero tensor of the same shape of op.outputs[index].
    """
        if util.IsLoopSwitch(op):
            return None
        if op.graph._building_function:  # pylint: disable=protected-access
            # The optimization here is tricky to apply to functions
            return array_ops.zeros_like(op.outputs[index])
        dead_branch = util.IsSwitch(op)
        forward_ctxt = util.GetWhileContext(op)
        grad_state = self._map.get(forward_ctxt)
        if grad_state is None:
            # op is not in a while loop that is part of gradients().
            return ZerosLikeOutsideLoop(op, index)
        op_ctxt = op._get_control_flow_context()
        val = ops.convert_to_tensor(op.outputs[index], name="tensor")
        shape = val.get_shape()
        if shape.is_fully_defined():
            # If the shape is known statically, just create a zero tensor with
            # the right shape in the grad loop context.
            result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
            if dead_branch:
                # op is a cond switch. Guard the zero tensor with a switch.
                pred = grad_state.history_map.get(op_ctxt.pred.name)
                branch = op_ctxt.branch
                result = control_flow_ops._SwitchRefOrTensor(result,
                                                             pred)[1 - branch]
        else:
            # Unknown shape so keep a history of the shape at runtime.
            if dead_branch:
                # Need to add a special switch to guard the value.
                pred = op_ctxt.pred
                branch = op_ctxt.branch
                op_ctxt.outer_context.Enter()
                val = control_flow_ops._SwitchRefOrTensor(op.inputs[0],
                                                          pred)[1 - branch]
                zeros_shape = array_ops.shape_internal(val, optimize=False)
                op_ctxt.outer_context.Exit()
                val.op._set_control_flow_context(op_ctxt)
                zeros_shape.op._set_control_flow_context(op_ctxt)
            else:
                op_ctxt.Enter()
                zeros_shape = array_ops.shape_internal(val, optimize=False)
                op_ctxt.Exit()

            # Add forward accumulator for shape.
            grad_state.grad_context.Exit()
            history_zeros_shape = grad_state.AddForwardAccumulator(
                zeros_shape, dead_branch=dead_branch)
            grad_state.grad_context.Enter()

            # Create a zero tensor with the right shape.
            shape = grad_state.AddBackpropAccumulatedValue(
                history_zeros_shape, zeros_shape, dead_branch)
            result = array_ops.zeros(shape, val.dtype)
        return result