Exemplo n.º 1
0
def linspace(  # pylint: disable=missing-docstring
        start,
        stop,
        num=50,
        endpoint=True,
        retstep=False,
        dtype=float,
        axis=0):
    if dtype:
        dtype = np_utils.result_type(dtype)
    start = np_array_ops.array(start, dtype=dtype).data
    stop = np_array_ops.array(stop, dtype=dtype).data
    if num < 0:
        raise ValueError(
            'Number of samples {} must be non-negative.'.format(num))
    step = ops.convert_to_tensor(np.nan)
    if endpoint:
        result = math_ops.linspace(start, stop, num, axis=axis)
        if num > 1:
            step = (stop - start) / (num - 1)
    else:
        # math_ops.linspace does not support endpoint=False so we manually handle it
        # here.
        if num > 1:
            step = ((stop - start) / num)
            new_stop = math_ops.cast(stop, step.dtype) - step
            start = math_ops.cast(start, new_stop.dtype)
            result = math_ops.linspace(start, new_stop, num, axis=axis)
        else:
            result = math_ops.linspace(start, stop, num, axis=axis)
    if dtype:
        result = math_ops.cast(result, dtype)
    if retstep:
        return (np_arrays.tensor_to_ndarray(result),
                np_arrays.tensor_to_ndarray(step))
    else:
        return np_arrays.tensor_to_ndarray(result)
Exemplo n.º 2
0
def average(a, axis=None, weights=None, returned=False):  # pylint: disable=missing-docstring
    if axis is not None and not isinstance(axis, six.integer_types):
        # TODO(wangpeng): Support tuple of ints as `axis`
        raise ValueError('`axis` must be an integer. Tuple of ints is not '
                         'supported yet. Got type: %s' % type(axis))
    a = np_array_ops.array(a)
    if weights is None:  # Treat all weights as 1
        if not np.issubdtype(a.dtype.as_numpy_dtype, np.inexact):
            a = a.astype(
                np_utils.result_type(a.dtype, np_dtypes.default_float_type()))
        avg = math_ops.reduce_mean(a, axis=axis)
        if returned:
            if axis is None:
                weights_sum = array_ops.size(a)
            else:
                weights_sum = array_ops.shape(a)[axis]
            weights_sum = math_ops.cast(weights_sum, a.dtype)
    else:
        if np.issubdtype(a.dtype.as_numpy_dtype, np.inexact):
            out_dtype = np_utils.result_type(a.dtype, weights)
        else:
            out_dtype = np_utils.result_type(a.dtype, weights,
                                             np_dtypes.default_float_type())
        a = np_array_ops.array(a, out_dtype)
        weights = np_array_ops.array(weights, out_dtype)

        def rank_equal_case():
            control_flow_ops.Assert(
                math_ops.reduce_all(
                    array_ops.shape(a) == array_ops.shape(weights)),
                [array_ops.shape(a),
                 array_ops.shape(weights)])
            weights_sum = math_ops.reduce_sum(weights, axis=axis)
            avg = math_ops.reduce_sum(a * weights, axis=axis) / weights_sum
            return avg, weights_sum

        if axis is None:
            avg, weights_sum = rank_equal_case()
        else:

            def rank_not_equal_case():
                control_flow_ops.Assert(
                    array_ops.rank(weights) == 1, [array_ops.rank(weights)])
                weights_sum = math_ops.reduce_sum(weights)
                axes = ops.convert_to_tensor([[axis], [0]])
                avg = math_ops.tensordot(a, weights, axes) / weights_sum
                return avg, weights_sum

            # We condition on rank rather than shape equality, because if we do the
            # latter, when the shapes are partially unknown but the ranks are known
            # and different, np_utils.cond will run shape checking on the true branch,
            # which will raise a shape-checking error.
            avg, weights_sum = np_utils.cond(
                math_ops.equal(array_ops.rank(a), array_ops.rank(weights)),
                rank_equal_case, rank_not_equal_case)

    avg = np_array_ops.array(avg)
    if returned:
        weights_sum = np_array_ops.broadcast_to(weights_sum,
                                                array_ops.shape(avg))
        return avg, weights_sum
    return avg
Exemplo n.º 3
0
def conv(lhs,
         rhs,
         window_strides,
         padding,
         lhs_dilation,
         rhs_dilation,
         dimension_numbers,
         feature_group_count=1,
         precision_config=None,
         preferred_element_type=None,
         name=None):
    """Wraps the XLA ConvGeneralDilated operator.

  ConvGeneralDilated is the most general form of XLA convolution and is
  documented at
  https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution

  Args:
    lhs: the input tensor
    rhs: the kernel tensor
    window_strides: the inter-window strides
    padding: the padding to apply at the start and end of each input dimensions
    lhs_dilation: dilation to apply between input elements
    rhs_dilation: dilation to apply between kernel elements
    dimension_numbers: a `ConvolutionDimensionNumbers` proto.
    feature_group_count: number of feature groups for grouped convolution.
    precision_config: a `xla.PrecisionConfig` proto.
    preferred_element_type: the result `dtype`.
    name: an optional name for the operator

  Returns:
    A tensor representing the output of the convolution.
  """
    precision_config_proto = ""
    if precision_config:
        precision_config_proto = precision_config.SerializeToString()
    needs_v2 = preferred_element_type or (lhs.dtype != rhs.dtype)
    if preferred_element_type is None:
        preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype)
    if needs_v2:
        return gen_xla_ops.xla_conv_v2(
            lhs,
            rhs,
            window_strides=window_strides,
            padding=padding,
            lhs_dilation=lhs_dilation,
            rhs_dilation=rhs_dilation,
            feature_group_count=feature_group_count,
            dimension_numbers=dimension_numbers.SerializeToString(),
            precision_config=precision_config_proto,
            preferred_element_type=preferred_element_type,
            name=name)
    return gen_xla_ops.xla_conv(
        lhs,
        rhs,
        window_strides=window_strides,
        padding=padding,
        lhs_dilation=lhs_dilation,
        rhs_dilation=rhs_dilation,
        feature_group_count=feature_group_count,
        dimension_numbers=dimension_numbers.SerializeToString(),
        precision_config=precision_config_proto,
        name=name)
Exemplo n.º 4
0
def _reduce(tf_fn,
            a,
            axis=None,
            dtype=None,
            keepdims=None,
            promote_int=_TO_INT64,
            tf_bool_fn=None,
            preserve_bool=False):
    """A general reduction function.

  Args:
    tf_fn: the TF reduction function.
    a: the array to be reduced.
    axis: (optional) the axis along which to do the reduction. If None, all
      dimensions are reduced.
    dtype: (optional) the dtype of the result.
    keepdims: (optional) whether to keep the reduced dimension(s).
    promote_int: how to promote integer and bool inputs. There are three
      choices: (1) _TO_INT64: always promote them to int64 or uint64; (2)
      _TO_FLOAT: always promote them to a float type (determined by
      dtypes.default_float_type); (3) None: don't promote.
    tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
      only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
      is `np.bool_` and `preserve_bool` is True.
    preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
      is `np.bool_` (some reductions such as np.sum convert bools to integers,
      while others such as np.max preserve bools.

  Returns:
    An ndarray.
  """
    if dtype:
        dtype = np_utils.result_type(dtype)
    if keepdims is None:
        keepdims = False
    a = asarray(a, dtype=dtype)
    if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_)
            and tf_bool_fn is not None):
        return np_utils.tensor_to_ndarray(
            tf_bool_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
    if dtype is None:
        dtype = a.dtype
        if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
            if promote_int == _TO_INT64:
                # If a is an integer/bool type and whose bit width is less than 64,
                # numpy up-casts it to 64-bit.
                if dtype == np.bool_:
                    is_signed = True
                    width = 8  # We can use any number here that is less than 64
                else:
                    is_signed = np.issubdtype(dtype, np.signedinteger)
                    width = np.iinfo(dtype).bits
                if width < 64:
                    if is_signed:
                        dtype = np.int64
                    else:
                        dtype = np.uint64
                    a = a.astype(dtype)
            elif promote_int == _TO_FLOAT:
                a = a.astype(np_dtypes.default_float_type())

    return np_utils.tensor_to_ndarray(
        tf_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
Exemplo n.º 5
0
def _promote_dtype(*arrays):
    dtype = np_utils.result_type(*arrays)
    return [asarray(a, dtype=dtype) for a in arrays]
Exemplo n.º 6
0
def asarray(a, dtype=None):
    if dtype:
        dtype = np_utils.result_type(dtype)
    if isinstance(a, np_arrays.ndarray) and (not dtype or dtype == a.dtype):
        return a
    return array(a, dtype, copy=False)