Beispiel #1
0
    if not context.context().executing_eagerly():
        return array_ops.ones(shape, dtype)

    if as_dtype.is_bool:
        value = True
    else:
        value = 1

    if shape == ():  # pylint: disable=g-explicit-bool-comparison
        return constant_op.constant(value, dtype=dtype)
    return _fast_fill(value, shape, dtype)


_default_vspace = imperative_grad.VSpace(num_elements_fn=_num_elements,
                                         aggregate_fn=_aggregate_grads,
                                         zeros_fn=_zeros,
                                         ones_fn=_ones,
                                         graph_shape_fn=gen_array_ops.shape)
pywrap_tensorflow.TFE_Py_RegisterVSpace(_default_vspace)


def _handle_or_self(x):
    """If x is ResourceVariable, return its handle, else x."""
    if resource_variable_ops.is_resource_variable(x):
        x = x.handle
    return x


@tf_export("GradientTape")
class GradientTape(object):
    """Record operations for automatic differentiation.
Beispiel #2
0
  Raises:
    ValueError: if `grad` is neight Tensor nor IndexedSlices.
  """
    tensor_grads = gradients[tid]
    tensor_grads.append(grad)
    if len(tensor_grads) < _MIN_AGGREGATE_COUNT:
        return
    elif tid not in gradients_size:
        if isinstance(grad, ops.Tensor):
            size = functools.reduce(operator.mul, grad._shape_tuple(), 1)  # pylint: disable=protected-access
        elif isinstance(grad, ops.IndexedSlices):
            size = functools.reduce(operator.mul, grad.values._shape_tuple(),
                                    1)  # pylint: disable=protected-access
        else:
            raise ValueError("Unexpected gradient type: %s" % type(grad))
        gradients_size[tid] = size
    else:
        size = gradients_size[tid]

    # For simplicity, assume each element to be 4 bytes now.
    if len(tensor_grads) * size * 4 > _MIN_AGGREGATE_BYTES:
        gradients[tid] = [_aggregate_grads(tensor_grads)]


_default_vspace = imperative_grad.VSpace(add_new_grads_fn=_add_new_grads,
                                         aggregate_fn=_aggregate_grads,
                                         tensor_id=ops.tensor_id,
                                         zeros=array_ops.zeros,
                                         ones_like=array_ops.ones_like)
Beispiel #3
0
    values = array_ops.concat([x.values for x in indexed_slices_list], 0)
    return ops.IndexedSlices(values, indices, dense_shape)


def _num_elements(grad):
  """The number of elements in the `grad` tensor."""
  if isinstance(grad, ops.Tensor):
    return functools.reduce(operator.mul, grad._shape_tuple(), 1)  # pylint: disable=protected-access
  if isinstance(grad, ops.IndexedSlices):
    return functools.reduce(operator.mul, grad.values._shape_tuple(), 1)  # pylint: disable=protected-access
  raise ValueError("`grad` not a Tensor or IndexedSlices.")


_default_vspace = imperative_grad.VSpace(
    num_elements_fn=_num_elements,
    aggregate_fn=_aggregate_grads,
    tensor_id=ops.tensor_id,
    zeros=array_ops.zeros,
    ones_like=array_ops.ones_like)


class GradientTape(object):
  """Records operations to use to compute gradients.

  Operations are recorded if:
    - they happen in code marked by this context manager
    - at least one of their inputs is being watched

  Outputs of recorded operations are watched. Variables are automatically
  watched and tensors can be manually watched by calling the watch method on the
  context manager.
Beispiel #4
0
  cached = _zeros_cache.get(cache_key)
  if cached is None:
    cached = _fast_fill(0, shape, dtype)
    _zeros_cache.put(cache_key, cached)
  return cached


def _ones(shape, dtype):
  if shape == ():  # pylint: disable=g-explicit-bool-comparison
    return constant_op.constant(1, dtype=dtype)
  return _fast_fill(1, shape, dtype)


_default_vspace = imperative_grad.VSpace(
    num_elements_fn=_num_elements,
    aggregate_fn=_aggregate_grads,
    tensor_id=ops.tensor_id,
    zeros=_zeros,
    ones=_ones)


def _handle_or_self(x):
  """If x is ResourceVariable, return its handle, else x."""
  if isinstance(x, resource_variable_ops.ResourceVariable):
    x = x.handle
  return x


@tf_export("GradientTape")
class GradientTape(object):
  """Record operations for automatic differentiation.
Beispiel #5
0
    values = array_ops.concat([x.values for x in indexed_slices_list], 0)
    return ops.IndexedSlices(values, indices, dense_shape)


def _num_elements(grad):
  """The number of elements in the `grad` tensor."""
  if isinstance(grad, ops.Tensor):
    return functools.reduce(operator.mul, grad._shape_tuple(), 1)  # pylint: disable=protected-access
  if isinstance(grad, ops.IndexedSlices):
    return functools.reduce(operator.mul, grad.values._shape_tuple(), 1)  # pylint: disable=protected-access
  raise ValueError("`grad` not a Tensor or IndexedSlices.")


_default_vspace = imperative_grad.VSpace(
    num_elements_fn=_num_elements,
    aggregate_fn=_aggregate_grads,
    tensor_id=ops.tensor_id,
    zeros=array_ops.zeros,
    ones_like=lambda x: ops.convert_to_tensor(array_ops.ones_like(x)))


class GradientTape(object):
  """Records operations to use to compute gradients.

  Operations are recorded if:
    - they happen in code marked by this context manager
    - at least one of their inputs is being watched

  Outputs of recorded operations are watched. Variables are automatically
  watched and tensors can be manually watched by calling the watch method on the
  context manager.