def _BatchMatrixDeterminantGrad(op, grad): """Gradient for BatchMatrixDeterminant.""" a = op.inputs[0] c = op.outputs[0] a_adj_inv = linalg_ops.batch_matrix_inverse(a, adjoint=True) multipliers = array_ops.reshape( grad * c, array_ops.concat(0, [array_ops.shape(c), [1, 1]])) return multipliers * a_adj_inv
def _BatchMatrixDeterminantGrad(op, grad): """Gradient for BatchMatrixDeterminant.""" a = op.inputs[0] c = op.outputs[0] a_adj_inv = linalg_ops.batch_matrix_inverse(a, adjoint=True) multipliers = array_ops.reshape( grad * c, c.get_shape().concatenate(tensor_shape.TensorShape([1, 1]))) return multipliers * a_adj_inv
def _BatchMatrixSolveGrad(op, grad): """Gradient for BatchMatrixSolve.""" a = op.inputs[0] c = op.outputs[0] # TODO(rmlarsen): Replace the following two lines with # a single call to batch_matrix_solve after adding # in an option to solve for A^T X = Y. ainv = linalg_ops.batch_matrix_inverse(a) grad_b = math_ops.batch_matmul(ainv, grad, adj_x=True) grad_a = -math_ops.batch_matmul(grad_b, c, adj_y=True) return (grad_a, grad_b)