Beispiel #1
0
def _TensorListSplitGrad(op, dlist):
    tensor, _, lengths = op.inputs
    element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1])
    element_shape = array_ops.concat([[-1], element_shape], axis=0)
    return gen_list_ops.tensor_list_concat_v2(
        dlist,
        element_shape=element_shape,
        leading_dims=lengths,
        element_dtype=op.inputs[0].dtype)[0], None, None
Beispiel #2
0
def _TensorListSplitGrad(op, dlist):
  tensor, _, lengths = op.inputs
  element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1])
  element_shape = array_ops.concat([[-1], element_shape], axis=0)
  return gen_list_ops.tensor_list_concat_v2(
      dlist,
      element_shape=element_shape,
      leading_dims=lengths,
      element_dtype=op.inputs[0].dtype)[0], None, None
Beispiel #3
0
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
                       name=None):
  # Ignore the lengths output of TensorListConcat. It is only used during
  # gradient computation.
  return gen_list_ops.tensor_list_concat_v2(
      input_handle=input_handle,
      element_dtype=element_dtype,
      element_shape=_build_element_shape(element_shape),
      leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64),
      name=name)[0]
Beispiel #4
0
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
                       name=None):
  # Ignore the lengths output of TensorListConcat. It is only used during
  # gradient computation.
  return gen_list_ops.tensor_list_concat_v2(
      input_handle=input_handle,
      element_dtype=element_dtype,
      element_shape=_build_element_shape(element_shape),
      leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64),
      name=name)[0]