示例#1
0
def _CosGrad(op, grad):
  """Returns grad * -sin(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    if x.dtype.is_complex:
      x = math_ops.conj(x)
    return -grad * math_ops.sin(x)
def GetMultiEngineGraphDef(dtype=dtypes.float32):
  """Create a graph containing multiple segment."""
  g = ops.Graph()
  with g.as_default():
    inp = array_ops.placeholder(
        dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
    with g.device("/GPU:0"):
      conv_filter = constant_op.constant(
          [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=inp,
          filter=conv_filter,
          strides=[1, 2, 2, 1],
          padding="SAME",
          name="conv")
      c1 = constant_op.constant(
          np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
      p = conv * c1
      c2 = constant_op.constant(
          np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
      q = conv / c2

      edge = math_ops.sin(q)
      edge /= edge
      r = edge + edge

      p -= edge
      q *= edge
      s = p + q
      s -= r
    array_ops.squeeze(s, name=OUTPUT_NAME)
  return g.as_graph_def()
示例#3
0
def angles_to_projective_transforms(angles,
                                    image_height,
                                    image_width,
                                    name=None):
    """Returns projective transform(s) for the given angle(s).

    Args:
      angles: A scalar angle to rotate all images by, or (for batches of images)
          a vector with an angle to rotate each image in the batch. The rank must
          be statically known (the shape is not `TensorShape(None)`.
      image_height: Height of the image(s) to be transformed.
      image_width: Width of the image(s) to be transformed.

    Returns:
      A tensor of shape (num_images, 8). Projective transforms which can be given
        to `transform` op.

    """
    with ops.name_scope(name, "angles_to_projective_transforms"):
        angle_or_angles = ops.convert_to_tensor(angles,
                                                name="angles",
                                                dtype=dtypes.float32)
        if len(angle_or_angles.get_shape()) == 0:
            angles = angle_or_angles[None]
        elif len(angle_or_angles.get_shape()) == 1:
            angles = angle_or_angles
        else:
            raise TypeError("Angles should have rank 0 or 1.")
        x_offset = ((image_width - 1) -
                    (math_ops.cos(angles) *
                     (image_width - 1) - math_ops.sin(angles) *
                     (image_height - 1))) / 2.0
        y_offset = ((image_height - 1) -
                    (math_ops.sin(angles) *
                     (image_width - 1) + math_ops.cos(angles) *
                     (image_height - 1))) / 2.0
        num_angles = array_ops.shape(angles)[0]
        return array_ops.concat(values=[
            math_ops.cos(angles)[:, None],
            -math_ops.sin(angles)[:, None],
            x_offset[:, None],
            math_ops.sin(angles)[:, None],
            math_ops.cos(angles)[:, None],
            y_offset[:, None],
            array_ops.zeros((num_angles, 2), dtypes.float32),
        ],
                                axis=1)
示例#4
0
 def call(self, inputs, state):
     gate_inputs = math_ops.matmul(inputs, self._ih)
     recurrent_update = math_ops.matmul(state, math_ops.sin(math_ops.matmul(self._hh, self._ih)))
     gate_inputs = math_ops.add(gate_inputs, recurrent_update)
     if self.bias:
         gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
     output = gate_inputs
     return output, output
        def func_with_bad_grad(x):
            output = math_ops.sin(x)

            @def_function.function
            def grad(dy):
                # `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.
                return math_ops.log(-dy)

            return output, grad
 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   return {'inputs': inputs}, labels
示例#7
0
 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   return {'inputs': inputs}, labels
示例#8
0
def angles_to_projective_transforms(angles,
                                    image_height,
                                    image_width,
                                    name=None):
  """Returns projective transform(s) for the given angle(s).

  Args:
    angles: A scalar angle to rotate all images by, or (for batches of images)
        a vector with an angle to rotate each image in the batch. The rank must
        be statically known (the shape is not `TensorShape(None)`.
    image_height: Height of the image(s) to be transformed.
    image_width: Width of the image(s) to be transformed.

  Returns:
    A tensor of shape (num_images, 8). Projective transforms which can be given
      to `tf.contrib.image.transform`.
  """
  with ops.name_scope(name, "angles_to_projective_transforms"):
    angle_or_angles = ops.convert_to_tensor(
        angles, name="angles", dtype=dtypes.float32)
    if len(angle_or_angles.get_shape()) == 0:  # pylint: disable=g-explicit-length-test
      angles = angle_or_angles[None]
    elif len(angle_or_angles.get_shape()) == 1:
      angles = angle_or_angles
    else:
      raise TypeError("Angles should have rank 0 or 1.")
    x_offset = ((image_width - 1) - (math_ops.cos(angles) *
                                     (image_width - 1) - math_ops.sin(angles) *
                                     (image_height - 1))) / 2.0
    y_offset = ((image_height - 1) - (math_ops.sin(angles) *
                                      (image_width - 1) + math_ops.cos(angles) *
                                      (image_height - 1))) / 2.0
    num_angles = array_ops.shape(angles)[0]
    return array_ops.concat(
        values=[
            math_ops.cos(angles)[:, None],
            -math_ops.sin(angles)[:, None],
            x_offset[:, None],
            math_ops.sin(angles)[:, None],
            math_ops.cos(angles)[:, None],
            y_offset[:, None],
            array_ops.zeros((num_angles, 2), dtypes.float32),
        ],
        axis=1)
示例#9
0
    def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
        """Adds a bunch of sinusoids of different frequencies to a Tensor.

        Each channel of the input Tensor is incremented by a sinusoid of a different
        frequency and phase.

        This allows attention to learn to use absolute and relative positions.
        Timing signals should be added to some precursors of both the query and the
        memory inputs to attention.

        The use of relative position is possible because sin(x+y) and cos(x+y) can be
        experessed in terms of y, sin(x) and cos(x).

        In particular, we use a geometric sequence of timescales starting with
        min_timescale and ending with max_timescale.  The number of different
        timescales is equal to channels / 2. For each timescale, we
        generate the two sinusoidal signals sin(timestep/timescale) and
        cos(timestep/timescale).  All of these sinusoids are concatenated in
        the channels dimension.

        Args:
          x: a Tensor with shape [batch, length, channels]
          min_timescale: a float
          max_timescale: a float

        Returns:
          a Tensor the same shape as x.
        """
        channels = x.get_shape().as_list()[-1]
        if x.get_shape().ndims == 3:  # [batch_size, timesteps, dim]
            length = array_ops.shape(x)[1]
            position = math_ops.to_float(math_ops.range(length))
        elif x.get_shape().ndims == 2:  # [batch_size, dim]
            length = 1
            position = math_ops.to_float(math_ops.range(time, time + 1))
        else:
            raise ValueError("need a Tensor with rank 2 or 3")
        num_timescales = channels // 2
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (math_ops.to_float(num_timescales) - 1))
        inv_timescales = min_timescale * math_ops.exp(
            math_ops.to_float(math_ops.range(num_timescales)) *
            -log_timescale_increment)
        scaled_time = array_ops.expand_dims(
            position, 1) * array_ops.expand_dims(inv_timescales, 0)
        signal = array_ops.concat(
            [math_ops.sin(scaled_time),
             math_ops.cos(scaled_time)], axis=1)
        signal = array_ops.pad(signal,
                               [[0, 0], [0, math_ops.mod(channels, 2)]])
        if x.get_shape().ndims == 3:
            signal = array_ops.reshape(signal, [1, length, channels])
        else:
            signal = array_ops.reshape(signal, [1, channels])
        return x + signal
 def Test(self):
     np.random.seed(1)
     n = shape_[-1]
     batch_shape = shape_[:-2]
     np_dtype = dtype_.as_numpy_dtype
     a = np.random.uniform(low=-1.0, high=1.0,
                           size=n * n).reshape([n, n]).astype(np_dtype)
     if dtype_.is_complex:
         a += 1j * np.random.uniform(low=-1.0, high=1.0, size=n *
                                     n).reshape([n, n]).astype(np_dtype)
     a += np.conj(a.T)
     a = np.tile(a, batch_shape + (1, 1))
     # Optimal stepsize for central difference is O(epsilon^{1/3}).
     epsilon = np.finfo(np_dtype).eps
     delta = 0.1 * epsilon**(1.0 / 3.0)
     # tolerance obtained by looking at actual differences using
     # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
     if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
         tol = 1e-2
     else:
         tol = 1e-7
     with self.session(use_gpu=True):
         tf_a = constant_op.constant(a)
         if compute_v_:
             tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
             # (complex) Eigenvectors are only unique up to an arbitrary phase
             # We normalize the vectors such that the first component has phase 0.
             top_rows = tf_v[..., 0:1, :]
             if tf_a.dtype.is_complex:
                 angle = -math_ops.angle(top_rows)
                 phase = math_ops.complex(math_ops.cos(angle),
                                          math_ops.sin(angle))
             else:
                 phase = math_ops.sign(top_rows)
             tf_v *= phase
             outputs = [tf_e, tf_v]
         else:
             tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
             outputs = [tf_e]
         for b in outputs:
             x_init = np.random.uniform(low=-1.0, high=1.0, size=n *
                                        n).reshape([n, n]).astype(np_dtype)
             if dtype_.is_complex:
                 x_init += 1j * np.random.uniform(
                     low=-1.0, high=1.0, size=n * n).reshape(
                         [n, n]).astype(np_dtype)
             x_init += np.conj(x_init.T)
             x_init = np.tile(x_init, batch_shape + (1, 1))
             theoretical, numerical = gradient_checker.compute_gradient(
                 tf_a,
                 tf_a.get_shape().as_list(),
                 b,
                 b.get_shape().as_list(),
                 x_init_value=x_init,
                 delta=delta)
             self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
    def testGradientsChained(self):
        @def_function.function
        def _forward(z):
            return math_ops.cos(z)

        f = _forward
        x = constant_op.constant(1.)
        with backprop.GradientTape() as t:
            t.watch(x)
            y = f(x)
        with backprop.GradientTape() as tt:
            doutputs = constant_op.constant(2.)
            tt.watch(doutputs)
            g = t.gradient(y, x, doutputs)
        self.assertAllClose(-2. * math_ops.sin(x), g)
        gg = tt.gradient(g, doutputs)
        # We're taking gradients with respect to doutputs, which is just a linear
        # function of the gradient.
        self.assertAllClose(-math_ops.sin(x), gg)
示例#12
0
      def f(x):
        y = math_ops.sin(x.numpy())

        def grad(dy):
          with forwardprop_util.push_forwardprop_state():
            x_copy = constant_op.constant(x.numpy())
            acc._watch(x_copy, dy)
            y_copy = math_ops.sin(x_copy)
          return dy * acc.jvp(y_copy)

        return y, grad
 def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   np_dtype = dtype_.as_numpy_dtype
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   if dtype_.is_complex:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(np_dtype).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
     tol = 1e-2
   else:
     tol = 1e-7
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     if compute_v_:
       tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
       # (complex) Eigenvectors are only unique up to an arbitrary phase
       # We normalize the vectors such that the first component has phase 0.
       top_rows = tf_v[..., 0:1, :]
       if tf_a.dtype.is_complex:
         angle = -math_ops.angle(top_rows)
         phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
       else:
         phase = math_ops.sign(top_rows)
       tf_v *= phase
       outputs = [tf_e, tf_v]
     else:
       tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
       outputs = [tf_e]
     for b in outputs:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       if dtype_.is_complex:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
 def Compute(x):
   e, v = linalg_ops.self_adjoint_eig(x)
   # (complex) Eigenvectors are only unique up to an arbitrary phase
   # We normalize the vectors such that the first component has phase 0.
   top_rows = v[..., 0:1, :]
   if dtype_.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   v *= phase
   return e, v
示例#15
0
def vorbis_window(window_length, dtype=dtypes.float32, name=None):
  """Generate a [Vorbis power complementary window][vorbis].

  Args:
    window_length: A scalar `Tensor` indicating the window length to generate.
    dtype: The data type to produce. Must be a floating point type.
    name: An optional name for the operation.

  Returns:
    A `Tensor` of shape `[window_length]` of type `dtype`.

  [vorbis]:
    https://en.wikipedia.org/wiki/Modified_discrete_cosine_transform#Window_functions
  """
  with ops.name_scope(name, 'vorbis_window'):
    window_length = _check_params(window_length, dtype)
    arg = math_ops.cast(math_ops.range(window_length), dtype=dtype)
    window = math_ops.sin(np.pi / 2.0 * math_ops.pow(math_ops.sin(
        np.pi / math_ops.cast(window_length, dtype=dtype) *
        (arg + 0.5)), 2.0))
  return window
示例#16
0
def angles_to_projective_transforms(angles, image_height, image_width):
    """Returns projective transform(s) for the given angle(s).

  Args:
    angles: A scalar angle to rotate all images by, or (for batches of images)
      a vector with an angle to rotate each image in the batch.
    image_height: Height of the image(s) to be transformed.
    image_width: Width of the image(s) to be transformed.

  Returns:
    A tensor of shape (num_images, 8). Projective transforms which can be given
      to `tf.contrib.image.transform`.
  """
    angle_or_angles = ops.convert_to_tensor(angles,
                                            name="angles",
                                            dtype=dtypes.float32)
    if len(angle_or_angles.get_shape()) == 0:  # pylint: disable=g-explicit-length-test
        angles = angle_or_angles[None]
    elif len(angle_or_angles.get_shape()) == 1:
        angles = angle_or_angles
    else:
        raise TypeError("Angles should have rank 0 or 1.")
    x_offset = ((image_width - 1) - (math_ops.cos(angles) *
                                     (image_width - 1) - math_ops.sin(angles) *
                                     (image_height - 1))) / 2.0
    y_offset = ((image_height - 1) -
                (math_ops.sin(angles) *
                 (image_width - 1) + math_ops.cos(angles) *
                 (image_height - 1))) / 2.0
    num_angles = array_ops.shape(angles)[0]
    return array_ops.concat(values=[
        math_ops.cos(angles)[:, None],
        -math_ops.sin(angles)[:, None],
        x_offset[:, None],
        math_ops.sin(angles)[:, None],
        math_ops.cos(angles)[:, None],
        y_offset[:, None],
        array_ops.zeros((num_angles, 2), dtypes.float32),
    ],
                            axis=1)
 def input_fn():
   start = random_ops.random_uniform(
       (), minval=0, maxval=(np.pi * 2.0), dtype=dtypes.float32, seed=seed)
   sin_curves = math_ops.sin(
       math_ops.linspace(start, (sequence_length - 1) * increment,
                         sequence_length + 1))
   inputs = array_ops.slice(sin_curves, [0], [sequence_length])
   labels = array_ops.slice(sin_curves, [1], [sequence_length])
   input_key = string_ops.string_join([
       'key_',
       string_ops.as_string(math_ops.cast(10000 * start, dtypes.int32))
   ])
   return {'inputs': inputs, input_key_column_name: input_key}, labels
示例#18
0
def get_rotation_matrix(angles, image_height, image_width, name=None):
    """Returns projective transform(s) for the given angle(s).

  Args:
    angles: A scalar angle to rotate all images by, or (for batches of images) a
      vector with an angle to rotate each image in the batch. The rank must be
      statically known (the shape is not `TensorShape(None)`).
    image_height: Height of the image(s) to be transformed.
    image_width: Width of the image(s) to be transformed.
    name: The name of the op.

  Returns:
    A tensor of shape (num_images, 8). Projective transforms which can be given
      to operation `image_projective_transform_v2`. If one row of transforms is
       [a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
       `(x, y)` to a transformed *input* point
       `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
       where `k = c0 x + c1 y + 1`.
  """
    with ops.name_scope(name, 'rotation_matrix'):
        x_offset = ((image_width - 1) -
                    (math_ops.cos(angles) *
                     (image_width - 1) - math_ops.sin(angles) *
                     (image_height - 1))) / 2.0
        y_offset = ((image_height - 1) -
                    (math_ops.sin(angles) *
                     (image_width - 1) + math_ops.cos(angles) *
                     (image_height - 1))) / 2.0
        num_angles = array_ops.shape(angles)[0]
        return array_ops.concat(values=[
            math_ops.cos(angles)[:, None],
            -math_ops.sin(angles)[:, None],
            x_offset[:, None],
            math_ops.sin(angles)[:, None],
            math_ops.cos(angles)[:, None],
            y_offset[:, None],
            array_ops.zeros((num_angles, 2), dtypes.float32),
        ],
                                axis=1)
示例#19
0
    def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
        """Adds a bunch of sinusoids of different frequencies to a Tensor.

        Each channel of the input Tensor is incremented by a sinusoid of a different
        frequency and phase.

        This allows attention to learn to use absolute and relative positions.
        Timing signals should be added to some precursors of both the query and the
        memory inputs to attention.

        The use of relative position is possible because sin(x+y) and cos(x+y) can be
        experessed in terms of y, sin(x) and cos(x).

        In particular, we use a geometric sequence of timescales starting with
        min_timescale and ending with max_timescale.  The number of different
        timescales is equal to channels / 2. For each timescale, we
        generate the two sinusoidal signals sin(timestep/timescale) and
        cos(timestep/timescale).  All of these sinusoids are concatenated in
        the channels dimension.

        Args:
          x: a Tensor with shape [batch, length, channels]
          min_timescale: a float
          max_timescale: a float

        Returns:
          a Tensor the same shape as x.
        """
        channels = x.get_shape().as_list()[-1]
        if x.get_shape().ndims == 3:  # [batch_size, timesteps, dim]
            length = array_ops.shape(x)[1]
            position = math_ops.to_float(math_ops.range(length))
        elif x.get_shape().ndims == 2:  # [batch_size, dim]
            length = 1
            position = math_ops.to_float(math_ops.range(time, time + 1))
        else:
            raise ValueError("need a Tensor with rank 2 or 3")
        num_timescales = channels // 2
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (math_ops.to_float(num_timescales) - 1))
        inv_timescales = min_timescale * math_ops.exp(
            math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment)
        scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0)
        signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1)
        signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]])
        if x.get_shape().ndims == 3:
            signal = array_ops.reshape(signal, [1, length, channels])
        else:
            signal = array_ops.reshape(signal, [1, channels])
        return x + signal
示例#20
0
def getRotatePoint(map_shape, rotate_center, rotate_theta, origin_point):
    """
    实现功能,得到绕旋转中心旋转theta角度后的坐标
    :param map_shape:原始地图的尺寸,因为Image中的坐标原点在图片左上角,需要改变坐标系    Tensor-[height,width,channel]
    :param rotate_center:旋转中心   Tensor-[loc_x,loc_y]
    :param rotate_theta:旋转角度   Tensor-[theta]
    :param origin_point:需要进行旋转操作的点集 Tensor-[loc_x,loc_y]
    :return: rotate_point_list: Tensor-[loc_x,loc_y]
    """
    row = map_shape[0]
    center_x = rotate_center[0]
    center_y = row - rotate_center[1]
    point_x = origin_point[0]
    point_y = row - origin_point[1]

    after_rotate_x = math_ops.round(
        (point_x - center_x) * math_ops.cos(rotate_theta) -
        (point_y - center_y) * math_ops.sin(rotate_theta) + center_x)
    after_rotate_y = row - math_ops.round(
        (point_x - center_x) * math_ops.sin(rotate_theta) +
        (point_y - center_y) * math_ops.cos(rotate_theta) + center_y)
    rotate_point = [after_rotate_x, after_rotate_y]
    rotate_point = tf.reshape(rotate_point, [2])
    return rotate_point
示例#21
0
      def Compute(x):
        e, v = linalg_ops.eig(x)

        # We sort eigenvalues by e.real+e.imag to have consistent
        # order between runs
        b_dims = len(e.shape) - 1
        idx = sort_ops.argsort(math_ops.real(e) + math_ops.imag(e), axis=-1)
        e = array_ops.gather(e, idx, batch_dims=b_dims)
        v = array_ops.gather(v, idx, batch_dims=b_dims)

        # (complex) Eigenvectors are only unique up to an arbitrary phase
        # We normalize the vectors such that the first component has phase 0.
        top_rows = v[..., 0:1, :]
        angle = -math_ops.angle(top_rows)
        phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
        v *= phase
        return e, v
示例#22
0
  def test_stft_round_trip(self):
    # Tuples of (signal_length, frame_length, frame_step, fft_length).
    test_configs = [
        # 87.5% overlap.
        (4096, 256, 32, 256),
        # 75% overlap.
        (4096, 256, 64, 256),
        # Odd frame hop.
        (4096, 128, 25, 128),
        # Odd frame length.
        (4096, 127, 32, 128),
    ]

    for signal_length, frame_length, frame_step, fft_length in test_configs:
      # Generate a 440Hz signal at 8kHz sample rate.
      signal = math_ops.sin(2 * np.pi * 440 / 8000 *
                            math_ops.to_float(math_ops.range(signal_length)))
      self._compare_round_trip(signal, frame_length, frame_step, fft_length)
示例#23
0
  def test_stft_round_trip(self):
    # Tuples of (signal_length, frame_length, frame_step, fft_length).
    test_configs = [
        # 87.5% overlap.
        (4096, 256, 32, 256),
        # 75% overlap.
        (4096, 256, 64, 256),
        # Odd frame hop.
        (4096, 128, 25, 128),
        # Odd frame length.
        (4096, 127, 32, 128),
    ]

    for signal_length, frame_length, frame_step, fft_length in test_configs:
      # Generate a 440Hz signal at 8kHz sample rate.
      signal = math_ops.sin(2 * np.pi * 440 / 8000 *
                            math_ops.to_float(math_ops.range(signal_length)))
      self._compare_round_trip(signal, frame_length, frame_step, fft_length)
示例#24
0
  def test_gradients(self):
    """Test that spectral_ops.stft has a working gradient."""
    with spectral_ops_test_util.fft_kernel_label_map(), (
        self.test_session(use_gpu=True)) as sess:
      signal_length = 512

      # An all-zero signal has all zero gradients with respect to the sum of the
      # magnitude STFT.
      empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
      empty_signal_gradient = sess.run(
          self._compute_stft_gradient(empty_signal))
      self.assertTrue((empty_signal_gradient == 0.0).all())

      # A sinusoid will have non-zero components of its gradient with respect to
      # the sum of the magnitude STFT.
      sinusoid = math_ops.sin(
          2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
      sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))
      self.assertFalse((sinusoid_gradient == 0.0).all())
示例#25
0
 def _NormalizingSvd(tf_a):
   tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
   # Singular vectors are only unique up to an arbitrary phase. We normalize
   # the vectors such that the first component of u (if m >=n) or v (if n > m)
   # have phase 0.
   m = tf_a.shape[-2]
   n = tf_a.shape[-1]
   if m >= n:
     top_rows = tf_u[..., 0:1, :]
   else:
     top_rows = tf_v[..., 0:1, :]
   if tf_u.dtype.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   tf_u *= phase[..., :m]
   tf_v *= phase[..., :n]
   return tf_s, tf_u, tf_v
示例#26
0
  def test_gradients(self):
    """Test that spectral_ops.stft has a working gradient."""
    with spectral_ops_test_util.fft_kernel_label_map(), (
        self.test_session(use_gpu=True)) as sess:
      signal_length = 512

      # An all-zero signal has all zero gradients with respect to the sum of the
      # magnitude STFT.
      empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
      empty_signal_gradient = sess.run(
          self._compute_stft_gradient(empty_signal))
      self.assertTrue((empty_signal_gradient == 0.0).all())

      # A sinusoid will have non-zero components of its gradient with respect to
      # the sum of the magnitude STFT.
      sinusoid = math_ops.sin(
          2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
      sinusoid_gradient = sess.run(self._compute_stft_gradient(sinusoid))
      self.assertFalse((sinusoid_gradient == 0.0).all())
示例#27
0
def get_multi_engine_graph_def(mode="FP32"):
    """Create a simple graph and return its graph_def."""
    dtype = dtypes.float32
    if mode.upper() == "FP16":
        dtype = dtypes.float16
    else:
        pass

    g = ops.Graph()
    with g.as_default():
        x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
        with g.name_scope("Global_scope"):
            with g.name_scope("first_scope"):
                e = cop.constant(np.random.randn(3, 2, 3, 4),
                                 name="weights",
                                 dtype=dtype)
                conv = nn.conv2d(input=x,
                                 filter=e,
                                 data_format="NCHW",
                                 strides=[1, 1, 1, 1],
                                 padding="VALID",
                                 name="conv")
                b = cop.constant(np.random.randn(1, 4, 1, 1),
                                 name="bias1",
                                 dtype=dtype)
                t = conv * b

                b = cop.constant(np.random.randn(1, 4, 1, 1),
                                 name="bias2",
                                 dtype=dtype)
                q = conv / b
            edge = mops.sin(q)
            edge1 = mops.cos(conv)
            with g.name_scope("test_scope"):
                de = edge + edge1
                t -= edge1
                q *= edge
                t += q
                t -= de
        k = aops.squeeze(t, name="output")
    print(k.dtype)
    return g.as_graph_def()
示例#28
0
 def _NormalizingSvd(tf_a):
   tf_s, tf_u, tf_v = linalg_ops.svd(
       tf_a, compute_uv=True, full_matrices=full_matrices_)
   # Singular vectors are only unique up to an arbitrary phase. We normalize
   # the vectors such that the first component of u (if m >=n) or v (if n > m)
   # have phase 0.
   m = tf_a.shape[-2]
   n = tf_a.shape[-1]
   if m >= n:
     top_rows = tf_u[..., 0:1, :]
   else:
     top_rows = tf_v[..., 0:1, :]
   if tf_u.dtype.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   tf_u *= phase[..., :m]
   tf_v *= phase[..., :n]
   return tf_s, tf_u, tf_v
示例#29
0
 def input_fn():
     start = random_ops.random_uniform((),
                                       minval=0,
                                       maxval=(np.pi * 2.0),
                                       dtype=dtypes.float32,
                                       seed=seed)
     sin_curves = math_ops.sin(
         math_ops.linspace(start, (sequence_length - 1) * increment,
                           sequence_length + 1))
     inputs = array_ops.slice(sin_curves, [0], [sequence_length])
     labels = array_ops.slice(sin_curves, [1], [sequence_length])
     input_key = string_ops.string_join([
         'key_',
         string_ops.as_string(
             math_ops.cast(10000 * start, dtypes.int32))
     ])
     return {
         'inputs': inputs,
         input_key_column_name: input_key
     }, labels
示例#30
0
  def test_gradients(self):
    """Test that spectral_ops.stft has a working gradient."""
    # TODO(rjryan): Update gradient tests for Eager.
    if context.executing_eagerly():
      return
    with self.session(use_gpu=True) as sess:
      signal_length = 512

      # An all-zero signal has all zero gradients with respect to the sum of the
      # magnitude STFT.
      empty_signal = array_ops.zeros([signal_length], dtype=dtypes.float32)
      empty_signal_gradient = sess.run(
          self._compute_stft_gradient(empty_signal))
      self.assertTrue((empty_signal_gradient == 0.0).all())

      # A sinusoid will have non-zero components of its gradient with respect to
      # the sum of the magnitude STFT.
      sinusoid = math_ops.sin(
          2 * np.pi * math_ops.linspace(0.0, 1.0, signal_length))
      sinusoid_gradient = self.evaluate(self._compute_stft_gradient(sinusoid))
      self.assertFalse((sinusoid_gradient == 0.0).all())
示例#31
0
def get_multi_engine_graph_def(mode="FP32"):
  """Create a simple graph and return its graph_def."""
  dtype = dtypes.float32
  if mode.upper() == "FP16":
    dtype = dtypes.float16
  else:
    pass

  g = ops.Graph()
  with g.as_default():
    x = aops.placeholder(shape=[None, 3, 7, 5], name="input", dtype=dtype)
    with g.name_scope("Global_scope"):
      with g.name_scope("first_scope"):
        e = cop.constant(
            np.random.randn(3, 2, 3, 4), name="weights", dtype=dtype)
        conv = nn.conv2d(
            input=x,
            filter=e,
            data_format="NCHW",
            strides=[1, 1, 1, 1],
            padding="VALID",
            name="conv")
        b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias1", dtype=dtype)
        t = conv * b

        b = cop.constant(np.random.randn(1, 4, 1, 1), name="bias2", dtype=dtype)
        q = conv / b
      edge = mops.sin(q)
      edge1 = mops.cos(conv)
      with g.name_scope("test_scope"):
        de = edge + edge1
        t -= edge1
        q *= edge
        t += q
        t -= de
    k = aops.squeeze(t, name="output")
  print(k.dtype)
  return g.as_graph_def()
def GetMultiEngineGraphDef(dtype=dtypes.float32):
    """Create a graph containing multiple segment."""
    g = ops.Graph()
    with g.as_default():
        inp = array_ops.placeholder(dtype=dtype,
                                    shape=[None] + INPUT_DIMS[1:],
                                    name=INPUT_NAME)
        with g.device("/GPU:0"):
            conv_filter = constant_op.constant(
                [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
                name="weights",
                dtype=dtype)
            conv = nn.conv2d(input=inp,
                             filter=conv_filter,
                             strides=[1, 2, 2, 1],
                             padding="SAME",
                             name="conv")
            c1 = constant_op.constant(np.random.randn(INPUT_DIMS[0], 12, 12,
                                                      6),
                                      dtype=dtype)
            p = conv * c1
            c2 = constant_op.constant(np.random.randn(INPUT_DIMS[0], 12, 12,
                                                      6),
                                      dtype=dtype)
            q = conv / c2

            edge = math_ops.sin(q)
            edge /= edge
            r = edge + edge

            p -= edge
            q *= edge
            s = p + q
            s -= r
        array_ops.squeeze(s, name=OUTPUT_NAME)
    return g.as_graph_def()
 def sin1p_log_sum(x, y):
     return math_ops.sin(1.0 + log_sum(x, y))
示例#34
0
def rotate(images, angles):
  """Rotate image(s) by the passed angle(s) in radians.

  Args:
    images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
       (NHWC), (num_rows, num_columns, num_channels) (HWC), or
       (num_rows, num_columns) (HW).
    angles: A scalar angle to rotate all images by, or (if images has rank 4)
       a vector of length num_images, with an angle for each image in the batch.

  Returns:
    Image(s) with the same type and shape as `images`, rotated by the given
    angle(s). Empty space due to the rotation will be filled with zeros.

  Raises:
    TypeError: If `image` is an invalid type.
  """
  image_or_images = ops.convert_to_tensor(images, name="images")
  angle_or_angles = ops.convert_to_tensor(
      angles, name="angles", dtype=dtypes.float32)
  if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
    raise TypeError("Invalid dtype %s." % image_or_images.dtype)
  if len(image_or_images.get_shape()) == 2:
    images = image_or_images[None, :, :, None]
  elif len(image_or_images.get_shape()) == 3:
    images = image_or_images[None, :, :, :]
  elif len(image_or_images.get_shape()) == 4:
    images = image_or_images
  else:
    raise TypeError("Images should have rank between 2 and 4.")

  if len(angle_or_angles.get_shape()) == 0:  # pylint: disable=g-explicit-length-test
    angles = angle_or_angles[None]
  elif len(angle_or_angles.get_shape()) == 1:
    angles = angle_or_angles
  else:
    raise TypeError("Angles should have rank 0 or 1.")
  image_width = math_ops.cast(array_ops.shape(images)[2], dtypes.float32)[None]
  image_height = math_ops.cast(array_ops.shape(images)[1], dtypes.float32)[None]
  x_offset = ((image_width - 1) - (math_ops.cos(angles) *
                                   (image_width - 1) - math_ops.sin(angles) *
                                   (image_height - 1))) / 2.0
  y_offset = ((image_height - 1) - (math_ops.sin(angles) *
                                    (image_width - 1) + math_ops.cos(angles) *
                                    (image_height - 1))) / 2.0
  num_angles = array_ops.shape(angles)[0]
  transforms = array_ops.concat(
      concat_dim=1,
      values=[
          math_ops.cos(angles)[:, None],
          -math_ops.sin(angles)[:, None],
          x_offset[:, None],
          math_ops.sin(angles)[:, None],
          math_ops.cos(angles)[:, None],
          y_offset[:, None],
          array_ops.zeros((num_angles, 2), dtypes.float32),
      ])
  # pylint: disable=protected-access
  output = transform(images, transforms)
  if len(image_or_images.get_shape()) == 2:
    return output[0, :, :, 0]
  elif len(image_or_images.get_shape()) == 3:
    return output[0, :, :, :]
  else:
    return output
示例#35
0
def _CosGrad(op, grad):
    """Returns grad * -sin(x)."""
    x = op.inputs[0]
    with ops.control_dependencies([grad]):
        x = math_ops.conj(x)
        return -grad * math_ops.sin(x)
示例#36
0
 def get_gradients():
   with backprop.GradientTape() as tape:
     loss = math_ops.sin(math_ops.square(v))
     gradients = tape.gradient(loss, v)
   return gradients
示例#37
0
  def transition_to_powers(self, powers):
    """Computes TransitionMatrix^power efficiently.

    For an n x n transition matrix we have:

      (TransitionMatrix**power)_{i, j) = (-1) ** i * sin(pi * power) / (n + 1)
          * ((-1) ** j / sin(pi / (n + 1) * (power - i + j))
             + 1 / sin(pi / (n + 1) * (power - i - 1)))

    The sin(pi * power) term is zero whenever "power" is an integer. However,
    the 1 / sin(x) terms (cosecants) occasionally (when their arguments are
    multiples of pi) cancel out this value. The limit as the argument approaches
    an integer value gives the "correct" result, but computing these separately
    gives 0 * inf = NaN. Instead, there is a special case for near-integer
    values.

    Args:
      powers: A floating point Tensor of powers to raise the transition matrix
        to.
    Returns:
      A [..., self._num_latent_values - 1, self._num_latent_values - 1] floating
        point Tensor with the transition matrix raised to each power in
        `powers`.

    """
    num_latent_values_float = math_ops.cast(self._num_latent_values, self.dtype)
    latent_values_per_period = (num_latent_values_float / math_ops.cast(
        self._true_periodicity, dtype=self.dtype))
    original_matrix_powers = (math_ops.cast(powers, self.dtype) *
                              latent_values_per_period)
    global_coeff = (math_ops.sin(original_matrix_powers * numpy.pi) /
                    num_latent_values_float)[..., None, None]
    matrix_dimension_range = array_ops.reshape(
        math_ops.range(self._num_latent_values - 1),
        array_ops.concat(
            [
                array_ops.ones(
                    [array_ops.rank(original_matrix_powers)],
                    dtype=dtypes.int32), [self._num_latent_values - 1]
            ],
            axis=0))
    matrix_dimension_range_float = math_ops.cast(matrix_dimension_range,
                                                 self.dtype)
    alternating = math_ops.cast(1 - 2 * (matrix_dimension_range % 2),
                                self.dtype)
    row_addend = 1. / math_ops.sin(numpy.pi / num_latent_values_float * (
        original_matrix_powers[..., None] - matrix_dimension_range_float - 1))
    column_minus_row = (matrix_dimension_range_float[..., None, :]
                        - matrix_dimension_range_float[..., None])
    full_matrix_addend = (alternating[..., None, :] / math_ops.sin(
        numpy.pi / num_latent_values_float *
        (original_matrix_powers[..., None, None] + column_minus_row)))
    continuous_construction = global_coeff * alternating[..., None] * (
        row_addend[..., None] + full_matrix_addend)
    # For integer powers, the above formula is only correct in the limit,
    # yielding NaNs as written. We defer to the super-class in such cases, which
    # computes integer powers exactly.
    return array_ops.where(
        self._close_to_integer(original_matrix_powers),
        super(ResolutionCycleModel, self).transition_to_powers(
            math_ops.cast(
                gen_math_ops.round(original_matrix_powers), dtypes.int64)),
        continuous_construction)
示例#38
0
 def log_2plus_unique_x(x):
   op_callbacks.add_op_callback(instrument.callback)
   unique_values, _ = array_ops.unique(x)
   y = math_ops.log(2.0 + unique_values)
   op_callbacks.remove_op_callback(instrument.callback)
   return math_ops.sin(y)
示例#39
0
 def _cosecant_with_freq(coefficient):
   return 1. / math_ops.sin(numpy.pi / num_latent_values_float * coefficient)
 def tf_function(self, x):
   """Takes tf tensor, evaluates the test function,  and returns tf tensor."""
   return math_ops.reduce_sum(
       math_ops.square(x - 0.5) + 0.25 * x + 1 * math_ops.sin(x * 15),
       2,
       keepdims=True)
示例#41
0
 def testJVPManual(self):
   primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),),
                          (constant_op.constant(0.2),))
   self.assertAllClose(math_ops.sin(0.1), primal)
   self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent)
示例#42
0
 def f(x):
     pi_x = x * np.pi
     return array_ops.where_v2(x == 0, array_ops.ones_like(x),
                               math_ops.sin(pi_x) / pi_x)
 def tf_function(self, x):
   """Takes tf tensor, evaluates the test function,  and returns tf tensor."""
   return math_ops.reduce_mean(
       math_ops.pow((x - 0.5), 3) - 0.25 * x + 10 * math_ops.sin(x * 10),
       2,
       keepdims=True)
 def _sin_fn(x):
   ranger = math_ops.linspace(
       array_ops.reshape(x[0], []), (sequence_length - 1) * increment,
       sequence_length + 1)
   return math_ops.sin(ranger)
示例#45
0
def _CosGrad(op, grad):
  """Returns grad * -sin(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    return -grad * math_ops.sin(x)
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest

_COS_DERIVATIVES = [
    math_ops.cos, lambda x: -math_ops.sin(x), lambda x: -math_ops.cos(x),
    math_ops.sin, math_ops.cos
]


class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
    def testGraphModeWithGradients(self):
        v = resource_variable_ops.ResourceVariable(1.0, name='v')

        @def_function.function
        def step():
            def inner():
                return v * v

            return backprop.implicit_grad(inner)()[0][0]
示例#47
0
 def f(x):
   pointwise = math_ops.sin(x) * math_ops.tan(x)
   return math_ops.reduce_prod(
       pointwise + math_ops.reduce_sum(pointwise), axis=1)
示例#48
0
  def _power_sum_array(self, max_remaining_steps):
    r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..max_remaining_steps.

    A is the transition matrix and B is the noise covariance.

    This is more efficient in practice than math_utils.power_sums_tensor, since
    each A^i B (A^i)^T term has a closed-form expression not depending on i - 1.
    Thus vectorization can replace explicit looping.

    Uses a cumulative sum on the following expression:

      (transition^p * transition_covariance * (transition^p)^T)_{i, j}
        = (-1)^(i + j) * sin^2(pi * p) / num_latent_values^2
          * (1/sin(pi / num_latent_values * (p - i))
             + 1/sin(pi / num_latent_values * (p - i - 1)))
          * (1/sin(pi / num_latent_values * (p - j))
             + 1/sin(pi / num_latent_values * (p - j - 1)))

    The expression being derived from the eigenvectors and eigenvalues given in
    the class docstring (and as with CycleStateSpaceModel taking advantage of
    the sparsity of the transition covariance).

    Args:
      max_remaining_steps: A scalar integer Tensor indicating the number of
        non-trivial values to compute.
    Returns:
      A [max_remaining_steps + 1, self._num_latent_values - 1,
      self._num_latent_values - 1] floating point Tensor S with cumulative power
      sums.

      S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
        S[0] is the zero matrix
        S[1] is B
        S[2] is A B A^T + B

    """
    num_latent_values_float = math_ops.cast(self._num_latent_values, self.dtype)
    latent_values_per_period = (num_latent_values_float / math_ops.cast(
        self._true_periodicity, dtype=self.dtype))
    original_matrix_powers = (math_ops.cast(
        math_ops.range(max_remaining_steps),
        self.dtype) * latent_values_per_period)
    matrix_dimension_range = math_ops.range(
        self._num_latent_values - 1)[None, ...]
    matrix_dimension_range_float = math_ops.cast(matrix_dimension_range,
                                                 self.dtype)
    def _cosecant_with_freq(coefficient):
      return 1. / math_ops.sin(numpy.pi / num_latent_values_float * coefficient)
    power_minus_index = (original_matrix_powers[..., None]
                         - matrix_dimension_range_float)
    mesh_values = (_cosecant_with_freq(power_minus_index)
                   + _cosecant_with_freq(power_minus_index - 1.))
    meshed = mesh_values[..., None, :] * mesh_values[..., None]
    full_matrix_alternating = math_ops.cast(1 - 2 * (
        (matrix_dimension_range[..., None, :] +
         matrix_dimension_range[..., None]) % 2), self.dtype)
    def _sine_discontinuity(value):
      """A special case for dealing with discontinuities.

      Decides whether `value`  is close to an integer, and if so computes:

        lim x->n |sin(x * pi)| / sin(x * pi) = sign(sin(n * pi))
                                             = cos(n * pi)

      Args:
        value: The floating point Tensor value which may lead to a
            discontinuity.
      Returns:
        A tuple of (is_discontinuous, sign):
          is_discontinuous: A boolean Tensor of the same shape as `value`,
              indicating whether it is near an integer.
          sign: A floating point Tensor indicating the sign of the discontinuity
            (being near 1 or -1 when `is_discontinuous` is True), of the same
            shape and type as `value`.
      """
      normalized = value / num_latent_values_float
      is_discontinuous = self._close_to_integer(normalized)
      sign = math_ops.cos(normalized * numpy.pi)
      return is_discontinuous, sign
    index_discontinuous, index_sign = _sine_discontinuity(
        original_matrix_powers[..., None]
        - matrix_dimension_range_float)
    index_minus_discontinuous, index_minus_sign = _sine_discontinuity(
        original_matrix_powers[..., None]
        - matrix_dimension_range_float
        - 1)
    ones_mask_vector = math_ops.logical_or(index_discontinuous,
                                           index_minus_discontinuous)
    ones_sign_vector = array_ops.where(index_discontinuous, index_sign,
                                       index_minus_sign)
    ones_mask = math_ops.logical_and(ones_mask_vector[..., None],
                                     ones_mask_vector[..., None, :])
    zeros_mask = self._close_to_integer(original_matrix_powers)
    zeroed = array_ops.where(zeros_mask, array_ops.zeros_like(meshed), meshed)
    global_coefficient = (math_ops.sin(numpy.pi * original_matrix_powers) /
                          num_latent_values_float)
    masked_meshed = array_ops.where(
        ones_mask, ones_sign_vector[..., None] * ones_sign_vector[..., None, :],
        zeroed * global_coefficient[..., None, None]**2)
    powers_above_zero = full_matrix_alternating * masked_meshed
    return array_ops.pad(
        math_ops.cumsum(powers_above_zero), [(1, 0), (0, 0), (0, 0)])
示例#49
0
 def func0(x):
   return math_ops.square(math_ops.sin(x))
示例#50
0
def _CosGrad(op, grad):
  """Returns grad * -sin(x)."""
  x = op.inputs[0]
  return -grad * math_ops.sin(x)