def __init__(self,
              params_grads,
              batch_size,
              colocate_cov_ops_with_inputs=False):
     self._batch_size = batch_size
     self._colocate_cov_ops_with_inputs = colocate_cov_ops_with_inputs
     params_grads_flat = []
     for params_grad in params_grads:
         with _maybe_colocate_with(params_grad,
                                   self._colocate_cov_ops_with_inputs):
             col = utils.tensors_to_column(params_grad)
             params_grads_flat.append(col)
     self._params_grads = tuple(params_grads_flat)
     self._orig_params_grads_name = scope_string_from_params(
         [self._params_grads, self._batch_size])
     super(NaiveDiagonalFactor, self).__init__()
Esempio n. 2
0
 def multiply(self, vector):
     vector_flat = utils.tensors_to_column(vector)
     out_flat = vector_flat * (self._factor.get_cov() + self._damping)
     return utils.column_to_tensors(vector, out_flat)
Esempio n. 3
0
 def multiply(self, vector):
     vector_flat = utils.tensors_to_column(vector)
     out_flat = (math_ops.matmul(self._factor.get_cov(), vector_flat) +
                 self._damping * vector_flat)
     return utils.column_to_tensors(vector, out_flat)
Esempio n. 4
0
 def multiply_inverse(self, vector):
     inverse = self._factor.get_damped_inverse(self._damping)
     out_flat = math_ops.matmul(inverse, utils.tensors_to_column(vector))
     return utils.column_to_tensors(vector, out_flat)