Пример #1
0
  def testReduceStd(self):
    x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
    self.assertAllClose(
        self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])

    x = np.array([[1, 2, 1, 1], [1, 1, 0, 1]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0.5)
Пример #2
0
    def testReduceStd(self):
        x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
        self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
        self.assertAllClose(self.evaluate(math_ops.reduce_std(x, axis=0)),
                            [0, 0, 0])

        x = np.array([[1, 2, 1, 1], [1, 1, 0, 1]], "float32")
        self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0.5)
Пример #3
0
 def _gradient_normalization(self,
                             grad,
                             non_zero,
                             centralize_gradients=True,
                             normalize_gradients=True):
     """
     substract the mean from the gradient and divide it by its standard deviation
     `non_zero` is a function that takes an input and insures that it will not be zero or negative
     """
     ndim = grad.shape.ndims
     can_centralize = centralize_gradients and (ndim > 1)
     size = 1
     for i in range(ndim):
         size *= grad.shape.dims[i].value  # np.prod(grad.shape.dims)
     can_normalize = normalize_gradients and (size > 2)
     if can_centralize or can_normalize:
         # takes into account the fact that the gradient might be 1D
         keepdims = (ndim > 1)
         axis = list(range(0, ndim - 1)) if keepdims else None
         # substract the mean from the gradient
         grad_mean = math_ops.reduce_mean(grad,
                                          axis=axis,
                                          keepdims=keepdims)
         grad -= grad_mean
         if can_normalize:
             # divide the centralized gradient by its standard deviation
             grad_std = math_ops.reduce_std(grad,
                                            axis=axis,
                                            keepdims=keepdims)
             grad /= non_zero(
                 grad_std)  # we divide *after* subtracting the mean
             # add the mean back to the gradient if we don't want to centralize it
             if not can_centralize:
                 grad += grad_mean
     return grad
Пример #4
0
    def call(self, y_true, y_pred):
        y_pred = ops.convert_to_tensor_v2(y_pred)
        y_true = math_ops.cast(y_true, y_pred.dtype)

        y_pred = math_ops.round(y_pred)
        y_true = math_ops.round(y_true)
        y_pred_mean = math_ops.reduce_mean(y_pred, keepdims=True)
        y_true_mean = math_ops.reduce_mean(y_true, keepdims=True)
        pred_std = math_ops.reduce_std(y_pred, keepdims=True)
        true_std = math_ops.reduce_std(y_true, keepdims=True)

        pearson = self.pcc(y_true, y_pred)
        ccc_n = (2.0 * pearson * pred_std * true_std)

        ccc_d = (math_ops.square(pred_std) + math_ops.square(true_std) +
                 math_ops.square(y_pred_mean - y_true_mean))

        ccc = ccc_n / (ccc_d + 1e-25)
        return ccc
Пример #5
0
 def testReduceStdComplex(self):
   # Ensure that complex values are handled to be consistent with numpy
   complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
                 (np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
                 (np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
   for y, dtype in complex_ys:
     y_result = math_ops.reduce_std(y)
     self.assertEqual(np.std(y), 1.0)
     self.assertEqual(self.evaluate(y_result), 1.0)
     self.assertEqual(y_result.dtype, dtype)
Пример #6
0
    def update_state(self, y_true, y_pred, sample_weight=None):
        y_pred = ops.convert_to_tensor_v2(y_pred)
        y_true = math_ops.cast(y_true, y_pred.dtype)

        y_pred = math_ops.round(y_pred)
        y_true = math_ops.round(y_true)
        y_pred_mean = math_ops.reduce_mean(y_pred, keepdims=True)
        y_true_mean = math_ops.reduce_mean(y_true, keepdims=True)
        pred_std = math_ops.reduce_std(y_pred, keepdims=True)
        true_std = math_ops.reduce_std(y_true, keepdims=True)

        pearson = self.pcc(y_true, y_pred)
        ccc_n = (2.0 * pearson * pred_std * true_std)

        ccc_d = (math_ops.square(pred_std) + math_ops.square(true_std) + math_ops.square(
                y_pred_mean - y_true_mean))

        ccc = ccc_n / (ccc_d+ 1e-25)
        self.ccc_r.assign_add(tf.reduce_sum(ccc))
        self.total_count.assign_add(len(ccc))
Пример #7
0
    def get_grow_tensor(self, weights, method):
        """Different ways to initialize new connections.

    Args:
      weights: tf.Tensor or Variable.
      method: str, available options: 'zeros', 'random_normal', 'random_uniform'
        and 'initial_value'

    Returns:
      tf.Tensor same shape and type as weights.

    Raises:
      ValueError, when the method is not valid.
    """
        if not isinstance(method, six.string_types):
            raise ValueError('Grow-Init: %s is not a string' % method)

        if method == 'zeros':
            grow_tensor = array_ops.zeros_like(weights, dtype=weights.dtype)
        elif method.startswith('initial_dist'):
            original_shape = weights.initial_value.shape
            divisor = extract_number(method)
            grow_tensor = array_ops.reshape(
                random_ops.random_shuffle(
                    array_ops.reshape(weights.initial_value, [-1])),
                original_shape) / divisor
        elif method.startswith('random_normal'):
            stddev = math_ops.reduce_std(weights)
            divisor = extract_number(method)
            grow_tensor = self._random_normal(
                weights.shape,
                stddev=stddev,
                dtype=weights.dtype,
                seed=hash(weights.name + 'grow_init_n')) / divisor
        elif method.startswith('random_uniform'):
            mean = math_ops.reduce_mean(math_ops.abs(weights))
            divisor = extract_number(method)
            grow_tensor = self._random_uniform(
                weights.shape,
                minval=-mean,
                maxval=mean,
                dtype=weights.dtype,
                seed=hash(weights.name + 'grow_init_u')) / divisor
        else:
            raise ValueError('Grow-Init: %s is not a valid option.' % method)
        return grow_tensor
Пример #8
0
  def testReduceStd(self):
    x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
    self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
    self.assertAllClose(
        self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])

    x = [[1, 2, 1, 1], [1, 1, 0, 1]]
    with self.assertRaisesRegexp(TypeError, "must be either real or complex"):
      math_ops.reduce_std(x)

    x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
    self.assertEqual(self.evaluate(math_ops.reduce_std(x)), 0.5)
    x_np = np.array(x)
    self.assertEqual(np.std(x_np), 0.5)
    self.assertEqual(self.evaluate(math_ops.reduce_std(x_np)), 0.5)
    def test_embedding_lookup_sparse_with_initializer(self):
        id = 0
        embed_dim = 8
        elements_num = 262144
        for initializer, target_mean, target_stddev in [
            (init_ops.random_normal_initializer(0.0, 0.001), 0.0, 0.001),
            (init_ops.truncated_normal_initializer(0.0, 0.001), 0.0, 0.00088),
            (keras_init_ops.RandomNormalV2(mean=0.0,
                                           stddev=0.001), 0.0, 0.001),
        ]:
            with self.session(config=default_config,
                              use_gpu=test_util.is_gpu_available()):
                id += 1
                embedding_weights = de.get_variable(
                    "emb-init-bugfix-" + str(id),
                    key_dtype=dtypes.int64,
                    value_dtype=dtypes.float32,
                    devices=_get_devices() * 3,
                    initializer=initializer,
                    dim=embed_dim,
                )

                ids = np.random.randint(
                    -0x7FFFFFFFFFFFFFFF,
                    0x7FFFFFFFFFFFFFFF,
                    elements_num,
                    dtype=np.int64,
                )
                ids = np.unique(ids)
                ids = constant_op.constant(ids, dtypes.int64)
                vals_op = de.embedding_lookup(embedding_weights, ids,
                                              None).eval()

                mean = self.evaluate(math_ops.reduce_mean(vals_op))
                stddev = self.evaluate(math_ops.reduce_std(vals_op))
                rtol = 2e-5
                atol = rtol
                self.assertTrue(not (list(vals_op[0]) == list(vals_op[1])))
                self.assertAllClose(target_mean, mean, rtol, atol)
                self.assertAllClose(target_stddev, stddev, rtol, atol)
 def test_variable_initializer(self):
     id = 0
     for initializer, target_mean, target_stddev in [
         (-1.0, -1.0, 0.0),
         (init_ops.random_normal_initializer(0.0, 0.01, seed=2), 0.0, 0.01),
     ]:
         with self.session(config=default_config,
                           use_gpu=test_util.is_gpu_available()):
             id += 1
             keys = constant_op.constant(list(range(2**17)), dtypes.int64)
             table = de.get_variable(
                 "t1" + str(id),
                 key_dtype=dtypes.int64,
                 value_dtype=dtypes.float32,
                 initializer=initializer,
                 dim=10,
             )
             vals_op = table.lookup(keys)
             mean = self.evaluate(math_ops.reduce_mean(vals_op))
             stddev = self.evaluate(math_ops.reduce_std(vals_op))
             rtol = 2e-5
             atol = rtol
             self.assertAllClose(target_mean, mean, rtol, atol)
             self.assertAllClose(target_stddev, stddev, rtol, atol)
Пример #11
0
  def test_safe_embedding_lookup_sparse_with_initializer(self):
    id = 0
    embed_dim = 8
    dense_shape = np.array([64, 128, 32])
    total_space = 64 * 128 * 32
    elements_num = int(total_space * 0.50)
    for initializer, target_mean, target_stddev in [
        (init_ops.random_normal_initializer(0.0, 0.001), 0.0, 0.00029),
        (init_ops.truncated_normal_initializer(0.0, 0.001), 0.0, 0.00029),
        (keras_init_ops.RandomNormalV2(mean=0.0, stddev=0.001), 0.0, 0.00029),
    ]:
      with self.session(config=default_config,
                        use_gpu=test_util.is_gpu_available()):
        id += 1
        embedding_weights = de.get_variable(
            "safe-init-bugfix-" + str(id),
            key_dtype=dtypes.int64,
            value_dtype=dtypes.float32,
            devices=_get_devices() * 3,
            initializer=initializer,
            dim=embed_dim,
        )

        indices_1d = np.random.randint(0, total_space, elements_num)
        indices_1d = np.unique(indices_1d)
        indices_1d.sort()
        indices_3d = []
        for _i in range(indices_1d.size):
          a_indice = []
          quotient = int(indices_1d[_i] / (128 * 32))
          remainder = indices_1d[_i] % (128 * 32)
          a_indice.append(quotient)
          quotient = int(remainder / 32)
          remainder = remainder % 32
          a_indice.extend([quotient, remainder])
          indices_3d.extend([a_indice])
        indices_3d = np.array(indices_3d)

        ids = np.random.randint(
            -0x7FFFFFFFFFFFFFFF,
            0x7FFFFFFFFFFFFFFF,
            indices_1d.size,
            dtype=np.int64,
        )

        sparse_ids = sparse_tensor.SparseTensor(
            constant_op.constant(indices_3d, dtypes.int64),
            constant_op.constant(ids, dtypes.int64),
            constant_op.constant(dense_shape, dtypes.int64),
        )
        vals_op = de.safe_embedding_lookup_sparse(embedding_weights,
                                                  sparse_ids,
                                                  None,
                                                  combiner="mean").eval()

        mean = self.evaluate(math_ops.reduce_mean(vals_op))
        stddev = self.evaluate(math_ops.reduce_std(vals_op))
        rtol = 2e-4
        atol = rtol
        self.assertTrue(not (vals_op[0][0][0] == vals_op[0][0][1]))
        self.assertAllClose(target_mean, mean, rtol, atol)
        self.assertAllClose(target_stddev, stddev, rtol, atol)