def map_unique_objs(self, uniqueobjs: List) -> List: assert_runtime( len(uniqueobjs) == self.num_unique, "The uniqueobjs must have %d elements" % self.num_unique) if self.all_unique: return uniqueobjs return [uniqueobjs[idx] for idx in self.nonunique_map_idxs]
def get_unique_objs(self, allobjs: Optional[List] = None) -> List: if allobjs is None: return self.unique_objs assert_runtime( len(allobjs) == self.nobjs, "The allobjs must have %d elements" % self.nobjs) if self.all_unique: return allobjs return [allobjs[i] for i in self.unique_idxs]
def solve_ivp(fcn: Union[Callable[..., torch.Tensor], Callable[..., Sequence[torch.Tensor]]], ts: torch.Tensor, y0: torch.Tensor, params: Sequence[Any] = [], bck_options: Mapping[str, Any] = {}, method: Union[str, Callable, None] = None, **fwd_options) -> Union[torch.Tensor, Sequence[torch.Tensor]]: r""" Solve the initial value problem (IVP) or also commonly known as ordinary differential equations (ODE), where given the initial value :math:`\mathbf{y_0}`, it then solves .. math:: \mathbf{y}(t) = \mathbf{y_0} + \int_{t_0}^{t} \mathbf{f}(t', \mathbf{y}, \theta)\ \mathrm{d}t' Arguments --------- fcn: callable The function that represents dy/dt. The function takes an input of a single time ``t`` and tensor ``y`` with shape ``(*ny)`` and produce :math:`\mathrm{d}\mathbf{y}/\mathrm{d}t` with shape ``(*ny)``. The output of the function must be a tensor with shape ``(*ny)`` or a list of tensors. ts: torch.tensor The time points where the value of `y` will be returned. It must be monotonically increasing or decreasing. It is a tensor with shape ``(nt,)``. y0: torch.tensor The initial value of ``y``, i.e. ``y(t[0]) == y0``. It is a tensor with shape ``(*ny)`` or a list of tensors. params: list Sequence of other parameters required in the function. bck_options: dict Options for the backward solve_ivp method. If not specified, it will take the same options as fwd_options. method: str or callable or None Initial value problem solver. If None, it will choose ``"rk45"``. **fwd_options Method-specific option (see method section below). Returns ------- torch.tensor or a list of tensors The values of ``y`` for each time step in ``ts``. It is a tensor with shape ``(nt,*ny)`` or a list of tensors """ if is_debug_enabled(): assert_fcn_params(fcn, (ts[0], y0, *params)) assert_runtime(len(ts.shape) == 1, "Argument ts must be a 1D tensor") if method is None: # set the default method method = "rk45" fwd_options["method"] = method # run once to see if the outputs is a tuple or a single tensor is_y0_list = isinstance(y0, list) or isinstance(y0, tuple) dydt = fcn(ts[0], y0, *params) is_dydt_list = isinstance(dydt, list) or isinstance(dydt, tuple) if is_y0_list != is_dydt_list: raise RuntimeError( "The y0 and output of fcn must both be tuple or a tensor") pfcn = get_pure_function(fcn) if is_y0_list: nt = len(ts) roller = TensorPacker(y0) @make_sibling(pfcn) def pfcn2(t, ytensor, *params): ylist = roller.pack(ytensor) res_list = pfcn(t, ylist, *params) res = roller.flatten(res_list) return res y0 = roller.flatten(y0) res = _SolveIVP.apply(pfcn2, ts, fwd_options, bck_options, len(params), y0, *params, *pfcn.objparams()) return roller.pack(res) else: return _SolveIVP.apply(pfcn, ts, fwd_options, bck_options, len(params), y0, *params, *pfcn.objparams())
def symeig(A: LinearOperator, neig: Union[int, None] = None, mode: str = "lowest", M: Union[LinearOperator, None] = None, fwd_options: Mapping[str, Any] = {}, bck_options: Mapping[str, Any] = {}): """ Obtain `neig` lowest eigenvalues and eigenvectors of a linear operator. If M is specified, it solve the eigendecomposition Ax = eMx. Arguments --------- * A: xitorch.LinearOperator hermitian instance with shape (*BA, q, q) The linear module object on which the eigenpairs are constructed. * neig: int or None The number of eigenpairs to be retrieved. If None, all eigenpairs are retrieved * mode: str "lowest" or "uppermost"/"uppest". If "lowest", it will take the lowest `neig` eigenpairs. If "uppest", it will take the uppermost `neig`. * M: xitorch.LinearOperator hermitian instance with shape (*BM, q, q) or None The transformation on the right hand side. If None, then M=I. * fwd_options: dict with str as key Eigendecomposition iterative algorithm options. * bck_options: dict with str as key Conjugate gradient options to calculate the gradient in backpropagation calculation. Returns ------- * eigvals: (*BAM, neig) * eigvecs: (*BAM, na, neig) The lowest eigenvalues and eigenvectors, where *BAM are the broadcasted shape of *BA and *BM. """ assert_runtime(A.is_hermitian, "The linear operator A must be Hermitian") if M is not None: assert_runtime(M.is_hermitian, "The linear operator M must be Hermitian") assert_runtime( M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) mode = mode.lower() if mode == "uppermost": mode = "uppest" # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if "method" not in fwd_options or fwd_options["method"].lower( ) == "exacteig": return exacteig(A, neig, mode, M) else: # get the unique parameters of A & M params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return symeig_torchfcn.apply(A, neig, mode, M, fwd_options, bck_options, na, *params, *mparams)
def solve(A: LinearOperator, B: torch.Tensor, E: Union[torch.Tensor, None] = None, M: Optional[LinearOperator] = None, bck_options: Mapping[str, Any] = {}, method: Union[str, Callable, None] = None, **fwd_options) -> torch.Tensor: r""" Performing iterative method to solve the equation .. math:: \mathbf{AX=B} or .. math:: \mathbf{AX-MXE=B} where :math:`\mathbf{E}` is a diagonal matrix. This function can also solve batched multiple inverse equation at the same time by applying :math:`\mathbf{A}` to a tensor :math:`\mathbf{X}` with shape ``(...,na,ncols)``. The applied :math:`\mathbf{E}` are not necessarily identical for each column. Arguments --------- A: xitorch.LinearOperator A linear operator that takes an input ``X`` and produce the vectors in the same space as ``B``. It should have the shape of ``(*BA, na, na)`` B: torch.Tensor The tensor on the right hand side with shape ``(*BB, na, ncols)`` E: torch.Tensor or None If a tensor, it will solve :math:`\mathbf{AX-MXE = B}`. It will be regarded as the diagonal of the matrix. Otherwise, it just solves :math:`\mathbf{AX = B}` and ``M`` is ignored. If it is a tensor, it should have shape of ``(*BE, ncols)``. M: xitorch.LinearOperator or None The transformation on the ``E`` side. If ``E`` is ``None``, then this argument is ignored. If E is not ``None`` and ``M`` is ``None``, then ``M=I``. If LinearOperator, it must be Hermitian with shape ``(*BM, na, na)``. bck_options: dict Options of the iterative solver in the backward calculation. method: str or callable or None The method of linear equation solver. If ``None``, it will choose ``"cg"`` or ``"bicgstab"`` based on the matrices symmetry. `Note`: default method will be changed quite frequently, so if you want future compatibility, please specify a method. **fwd_options Method-specific options (see method below) Returns ------- torch.Tensor The tensor :math:`\mathbf{X}` that satisfies :math:`\mathbf{AX-MXE=B}`. """ assert_runtime(A.shape[-1] == A.shape[-2], "The linear operator A must have a square shape") assert_runtime( A.shape[-1] == B.shape[-2], "Mismatch shape of A & B (A: %s, B: %s)" % (A.shape, B.shape)) assert_runtime( not torch.is_grad_enabled() or A.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator A must be " "implemented if using solve with grad enabled") if M is not None: assert_runtime(M.shape[-1] == M.shape[-2], "The linear operator M must have a square shape") assert_runtime( M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) assert_runtime(M.is_hermitian, "The linear operator M must be a Hermitian matrix") assert_runtime( not torch.is_grad_enabled() or M.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator M must be " "implemented if using solve with grad enabled") if E is not None: assert_runtime( E.shape[-1] == B.shape[-1], "The last dimension of E & B must match (E: %s, B: %s)" % (E.shape, B.shape)) if E is None and M is not None: warnings.warn( "M is supplied but will be ignored because E is not supplied") # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if method is None: if isinstance(A, MatrixLinearOperator) and \ (M is None or isinstance(M, MatrixLinearOperator)): method = "exactsolve" else: is_hermit = A.is_hermitian and (M is None or M.is_hermitian) method = "cg" if is_hermit else "bicgstab" if method == "exactsolve": return exactsolve(A, B, E, M) else: # get the unique parameters of A params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return solve_torchfcn.apply(A, B, E, M, method, fwd_options, bck_options, na, *params, *mparams)
def symeig(A: LinearOperator, neig: Optional[int] = None, mode: str = "lowest", M: Optional[LinearOperator] = None, bck_options: Mapping[str, Any] = {}, method: Union[str, Callable, None] = None, **fwd_options) -> Tuple[torch.Tensor, torch.Tensor]: r""" Obtain ``neig`` lowest eigenvalues and eigenvectors of a linear operator, .. math:: \mathbf{AX = MXE} where :math:`\mathbf{A}, \mathbf{M}` are linear operators, :math:`\mathbf{E}` is a diagonal matrix containing the eigenvalues, and :math:`\mathbf{X}` is a matrix containing the eigenvectors. Arguments --------- A: xitorch.LinearOperator The linear operator object on which the eigenpairs are constructed. It must be a Hermitian linear operator with shape ``(*BA, q, q)`` neig: int or None The number of eigenpairs to be retrieved. If ``None``, all eigenpairs are retrieved mode: str ``"lowest"`` or ``"uppermost"``/``"uppest"``. If ``"lowest"``, it will take the lowest ``neig`` eigenpairs. If ``"uppest"``, it will take the uppermost ``neig``. M: xitorch.LinearOperator The transformation on the right hand side. If ``None``, then ``M=I``. If specified, it must be a Hermitian with shape ``(*BM, q, q)``. bck_options: dict Method-specific options for :func:`solve` which used in backpropagation calculation. method: str or callable or None Method for the eigendecomposition. If ``None``, it will choose ``"exacteig"``. **fwd_options Method-specific options (see method section below). Returns ------- tuple of tensors (eigenvalues, eigenvectors) It will return eigenvalues and eigenvectors with shapes respectively ``(*BAM, neig)`` and ``(*BAM, na, neig)``, where ``*BAM`` is the broadcasted shape of ``*BA`` and ``*BM``. """ assert_runtime(A.is_hermitian, "The linear operator A must be Hermitian") assert_runtime( not torch.is_grad_enabled() or A.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator A must be " "implemented if using symeig with grad enabled") if M is not None: assert_runtime(M.is_hermitian, "The linear operator M must be Hermitian") assert_runtime( M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) assert_runtime( not torch.is_grad_enabled() or M.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator M must be " "implemented if using symeig with grad enabled") mode = mode.lower() if mode == "uppermost": mode = "uppest" if method is None: if isinstance(A, MatrixLinearOperator) and \ (M is None or isinstance(M, MatrixLinearOperator)): method = "exacteig" else: # TODO: implement robust LOBPCG and put it here method = "exacteig" if neig is None: neig = A.shape[-1] # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if method == "exacteig": return exacteig(A, neig, mode, M) else: fwd_options["method"] = method # get the unique parameters of A & M params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return symeig_torchfcn.apply(A, neig, mode, M, fwd_options, bck_options, na, *params, *mparams)
def solve(A: LinearOperator, B: torch.Tensor, E: Union[torch.Tensor, None] = None, M: Union[LinearOperator, None] = None, posdef=False, fwd_options: Mapping[str, Any] = {}, bck_options: Mapping[str, Any] = {}): """ Performing iterative method to solve the equation AX=B or AX-MXE=B, where E is a diagonal matrix. This function can also solve batched multiple inverse equation at the same time by applying A to a tensor X with shape (...,na,ncols). The applied E are not necessarily identical for each column. Arguments --------- * A: xitorch.LinearOperator instance with shape (*BA, na, na) A function that takes an input X and produce the vectors in the same space as B. * B: torch.tensor (*BB, na, ncols) The tensor on the right hand side. * E: torch.tensor (*BE, ncols) or None If not None, it will solve AX-MXE = B. Otherwise, it just solves AX = B and M is ignored. E would be applied to every column. * M: xitorch.LinearOperator instance (*BM, na, na) or None The transformation on the E side. If E is None, then this argument is ignored. I E is not None and M is None, then M=I. This LinearOperator must be Hermitian. * fwd_options: dict Options of the iterative solver in the forward calculation * bck_options: dict Options of the iterative solver in the backward calculation """ assert_runtime(A.shape[-1] == A.shape[-2], "The linear operator A must have a square shape") assert_runtime( A.shape[-1] == B.shape[-2], "Mismatch shape of A & B (A: %s, B: %s)" % (A.shape, B.shape)) if M is not None: assert_runtime(M.shape[-1] == M.shape[-2], "The linear operator M must have a square shape") assert_runtime( M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) assert_runtime(M.is_hermitian, "The linear operator M must be a Hermitian matrix") if E is not None: assert_runtime( E.shape[-1] == B.shape[-1], "The last dimension of E & B must match (E: %s, B: %s)" % (E.shape, B.shape)) if E is None and M is not None: warnings.warn( "M is supplied but will be ignored because E is not supplied") # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if "method" not in fwd_options or fwd_options["method"].lower( ) == "exactsolve": return exactsolve(A, B, E, M) else: # get the unique parameters of A params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return solve_torchfcn.apply(A, B, E, M, posdef, fwd_options, bck_options, na, *params, *mparams)
def quad(fcn: Union[Callable[..., torch.Tensor], Callable[..., List[torch.Tensor]]], xl: Union[float, int, torch.Tensor], xu: Union[float, int, torch.Tensor], params: Sequence[Any] = [], fwd_options: Mapping[str, Any] = {}, bck_options: Mapping[str, Any] = {}): """ Calculate the quadrature of the function `fcn` from `x0` to `xf`: y = int_xl^xu fcn(x, *params) Arguments --------- * fcn: callable with output tensor with shape (*nout) or list of tensors The function to be integrated. * xl, xu: float, int, or 1-element torch.Tensor The lower and upper bound of the integration. * params: list List of any other parameters for the function `fcn`. * fwd_options: dict Options for the forward quadrature method. * bck_options: dict Options for the backward quadrature method. Returns ------- * y: torch.tensor with shape (*nout) or list of tensors The quadrature results. """ # perform implementation check if debug mode is enabled if is_debug_enabled(): assert_fcn_params(fcn, (xl, *params)) if isinstance(xl, torch.Tensor): assert_runtime(torch.numel(xl) == 1, "xl must be a 1-element tensors") if isinstance(xu, torch.Tensor): assert_runtime(torch.numel(xu) == 1, "xu must be a 1-element tensors") out = fcn(xl, *params) is_tuple_out = not isinstance(out, torch.Tensor) if not is_tuple_out: dtype = out.dtype device = out.device elif len(out) > 0: dtype = out[0].dtype device = out[0].device else: raise RuntimeError("The output of the fcn must be non-empty") pfunc = get_pure_function(fcn) nparams = len(params) if is_tuple_out: packer = TensorPacker(out) @make_sibling(pfunc) def pfunc2(x, *params): y = fcn(x, *params) return packer.flatten(y) res = _Quadrature.apply(pfunc2, xl, xu, fwd_options, bck_options, nparams, dtype, device, *params, *pfunc.objparams()) return packer.pack(res) else: return _Quadrature.apply(pfunc, xl, xu, fwd_options, bck_options, nparams, dtype, device, *params, *pfunc.objparams())
def symeig(A: LinearOperator, neig: Optional[int] = None, mode: str = "lowest", M: Optional[LinearOperator] = None, bck_options: Mapping[str, Any] = {}, method: Union[str, Callable, None] = None, **fwd_options) -> Tuple[torch.Tensor, torch.Tensor]: r""" Obtain ``neig`` lowest eigenvalues and eigenvectors of a linear operator, .. math:: \mathbf{AX = MXE} where :math:`\mathbf{A}, \mathbf{M}` are linear operators, :math:`\mathbf{E}` is a diagonal matrix containing the eigenvalues, and :math:`\mathbf{X}` is a matrix containing the eigenvectors. This function can handle derivatives for degenerate cases by setting non-zero ``degen_atol`` and ``degen_rtol`` in the backward option using the expressions in [1]_. Arguments --------- A: xitorch.LinearOperator The linear operator object on which the eigenpairs are constructed. It must be a Hermitian linear operator with shape ``(*BA, q, q)`` neig: int or None The number of eigenpairs to be retrieved. If ``None``, all eigenpairs are retrieved mode: str ``"lowest"`` or ``"uppermost"``/``"uppest"``. If ``"lowest"``, it will take the lowest ``neig`` eigenpairs. If ``"uppest"``, it will take the uppermost ``neig``. M: xitorch.LinearOperator The transformation on the right hand side. If ``None``, then ``M=I``. If specified, it must be a Hermitian with shape ``(*BM, q, q)``. bck_options: dict Method-specific options for :func:`solve` which used in backpropagation calculation with some additional arguments for computing the backward derivatives: * ``degen_atol`` (``float`` or None): Minimum absolute difference between two eigenvalues to be treated as degenerate. If None, it is ``torch.finfo(dtype).eps**0.6``. If 0.0, no special treatment on degeneracy is applied. (default: None) * ``degen_rtol`` (``float`` or None): Minimum relative difference between two eigenvalues to be treated as degenerate. If None, it is ``torch.finfo(dtype).eps**0.4``. If 0.0, no special treatment on degeneracy is applied. (default: None) Note: the default values of ``degen_atol`` and ``degen_rtol`` are going to change in the future. So, for future compatibility, please specify the specific values. method: str or callable or None Method for the eigendecomposition. If ``None``, it will choose ``"exacteig"``. **fwd_options Method-specific options (see method section below). Returns ------- tuple of tensors (eigenvalues, eigenvectors) It will return eigenvalues and eigenvectors with shapes respectively ``(*BAM, neig)`` and ``(*BAM, na, neig)``, where ``*BAM`` is the broadcasted shape of ``*BA`` and ``*BM``. References ---------- .. [1] Muhammad F. Kasim, "Derivatives of partial eigendecomposition of a real symmetric matrix for degenerate cases". arXiv:2011.04366 (2020) `https://arxiv.org/abs/2011.04366 <https://arxiv.org/abs/2011.04366>`_ """ assert_runtime(A.is_hermitian, "The linear operator A must be Hermitian") assert_runtime( not torch.is_grad_enabled() or A.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator A must be " "implemented if using symeig with grad enabled") if M is not None: assert_runtime(M.is_hermitian, "The linear operator M must be Hermitian") assert_runtime( M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) assert_runtime( not torch.is_grad_enabled() or M.is_getparamnames_implemented, "The _getparamnames(self, prefix) of linear operator M must be " "implemented if using symeig with grad enabled") mode = mode.lower() if mode == "uppermost": mode = "uppest" if method is None: if isinstance(A, MatrixLinearOperator) and \ (M is None or isinstance(M, MatrixLinearOperator)): method = "exacteig" else: # TODO: implement robust LOBPCG and put it here method = "exacteig" if neig is None: neig = A.shape[-1] # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if method == "exacteig": return exacteig(A, neig, mode, M) else: fwd_options["method"] = method # get the unique parameters of A & M params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return symeig_torchfcn.apply(A, neig, mode, M, fwd_options, bck_options, na, *params, *mparams)
def solve_ivp(fcn: Callable[..., torch.Tensor], ts: torch.Tensor, y0: torch.Tensor, params: Sequence[Any] = [], fwd_options: Mapping[str, Any] = {}, bck_options: Mapping[str, Any] = {}) -> torch.Tensor: """ Solve the initial value problem (IVP) which given the initial value `y0`, the function is then solve y(t) = y0 + int_t0^t f(t', y, *params) dt' Arguments --------- * fcn: callable with output a tensor with shape (*ny) or a list of tensors The function that represents dy/dt. The function takes an input of a single time `t` and `y` with shape (*ny) and produce dydt with shape (*ny). * ts: torch.tensor with shape (nt,) The time points where the value of `y` is returned. It must be monotonically increasing or decreasing. * y0: torch.tensor with shape (*ny) or a list of tensors The initial value of y, i.e. y(t[0]) == y0 * params: list List of other parameters required in the function. * fwd_options: dict Options for the forward solve_ivp method. * bck_options: dict Options for the backward solve_ivp method. Returns ------- * yt: torch.tensor with shape (nt,*ny) or a list of tensors The values of `y` for each time step in `ts`. """ if is_debug_enabled(): assert_fcn_params(fcn, (ts[0], y0, *params)) assert_runtime(len(ts.shape) == 1, "Argument ts must be a 1D tensor") # run once to see if the outputs is a tuple or a single tensor is_y0_list = isinstance(y0, list) or isinstance(y0, tuple) dydt = fcn(ts[0], y0, *params) is_dydt_list = isinstance(dydt, list) or isinstance(dydt, tuple) if is_y0_list != is_dydt_list: raise RuntimeError( "The y0 and output of fcn must both be tuple or a tensor") pfcn = get_pure_function(fcn) if is_y0_list: nt = len(ts) roller = TensorPacker(y0) @make_sibling(pfcn) def pfcn2(t, ytensor, *params): ylist = roller.pack(ytensor) res_list = pfcn(t, ylist, *params) res = roller.flatten(res_list) return res y0 = roller.flatten(y0) res = _SolveIVP.apply(pfcn2, ts, fwd_options, bck_options, len(params), y0, *params, *pfcn.objparams()) return roller.pack(res) else: return _SolveIVP.apply(pfcn, ts, fwd_options, bck_options, len(params), y0, *params, *pfcn.objparams())
def quad(fcn: Union[Callable[..., torch.Tensor], Callable[..., Sequence[torch.Tensor]]], xl: Union[float, int, torch.Tensor], xu: Union[float, int, torch.Tensor], params: Sequence[Any] = [], bck_options: Mapping[str, Any] = {}, method: Union[str, Callable, None] = None, **fwd_options) -> Union[torch.Tensor, Sequence[torch.Tensor]]: r""" Calculate the quadrature: .. math:: y = \int_{x_l}^{x_u} f(x, \theta)\ \mathrm{d}x Arguments --------- fcn: callable The function to be integrated. Its output must be a tensor with shape ``(*nout)`` or list of tensors. xl: float, int or 1-element torch.Tensor The lower bound of the integration. xu: float, int or 1-element torch.Tensor The upper bound of the integration. params: list Sequence of any other parameters for the function ``fcn``. bck_options: dict Options for the backward quadrature method. method: str or callable or None Quadrature method. If None, it will choose ``"leggauss"``. **fwd_options Method-specific options (see method section). Returns ------- torch.tensor or a list of tensors The quadrature results with shape ``(*nout)`` or list of tensors. """ # perform implementation check if debug mode is enabled if is_debug_enabled(): assert_fcn_params(fcn, (xl, *params)) if isinstance(xl, torch.Tensor): assert_runtime(torch.numel(xl) == 1, "xl must be a 1-element tensors") if isinstance(xu, torch.Tensor): assert_runtime(torch.numel(xu) == 1, "xu must be a 1-element tensors") if method is None: method = "leggauss" fwd_options["method"] = method out = fcn(xl, *params) if isinstance(out, torch.Tensor): dtype = out.dtype device = out.device is_tuple_out = False elif len(out) > 0: dtype = out[0].dtype device = out[0].device is_tuple_out = True else: raise RuntimeError("The output of the fcn must be non-empty") pfunc = get_pure_function(fcn) nparams = len(params) if is_tuple_out: packer = TensorPacker(out) @make_sibling(pfunc) def pfunc2(x, *params): y = fcn(x, *params) return packer.flatten(y) res = _Quadrature.apply(pfunc2, xl, xu, fwd_options, bck_options, nparams, dtype, device, *params, *pfunc.objparams()) return packer.pack(res) else: return _Quadrature.apply(pfunc, xl, xu, fwd_options, bck_options, nparams, dtype, device, *params, *pfunc.objparams())
def solve(A:LinearOperator, B:torch.Tensor, E:Union[torch.Tensor,None]=None, M:Union[LinearOperator,None]=None, posdef=False, bck_options:Mapping[str,Any]={}, method:Union[str,None]=None, **fwd_options): r""" Performing iterative method to solve the equation .. math:: \mathbf{AX=B} or .. math:: \mathbf{AX-MXE=B} where :math:`\mathbf{E}` is a diagonal matrix. This function can also solve batched multiple inverse equation at the same time by applying :math:`\mathbf{A}` to a tensor :math:`\mathbf{X}` with shape ``(...,na,ncols)``. The applied :math:`\mathbf{E}` are not necessarily identical for each column. Arguments --------- A: xitorch.LinearOperator A linear operator that takes an input ``X`` and produce the vectors in the same space as ``B``. It should have the shape of ``(*BA, na, na)`` B: torch.tensor The tensor on the right hand side with shape ``(*BB, na, ncols)`` E: torch.tensor or None If a tensor, it will solve :math:`\mathbf{AX-MXE = B}`. It will be regarded as the diagonal of the matrix. Otherwise, it just solves :math:`\mathbf{AX = B}` and ``M`` is ignored. If it is a tensor, it should have shape of ``(*BE, ncols)``. M: xitorch.LinearOperator or None The transformation on the ``E`` side. If ``E`` is ``None``, then this argument is ignored. If E is not ``None`` and ``M`` is ``None``, then ``M=I``. If LinearOperator, it must be Hermitian with shape ``(*BM, na, na)``. bck_options: dict Options of the iterative solver in the backward calculation. method: str or None Indicating the method of solve. If None, it will select ``exactsolve``. **fwd_options Method-specific options (see method below) """ assert_runtime(A.shape[-1] == A.shape[-2], "The linear operator A must have a square shape") assert_runtime(A.shape[-1] == B.shape[-2], "Mismatch shape of A & B (A: %s, B: %s)" % (A.shape, B.shape)) if M is not None: assert_runtime(M.shape[-1] == M.shape[-2], "The linear operator M must have a square shape") assert_runtime(M.shape[-1] == A.shape[-1], "The shape of A & M must match (A: %s, M: %s)" % (A.shape, M.shape)) assert_runtime(M.is_hermitian, "The linear operator M must be a Hermitian matrix") if E is not None: assert_runtime(E.shape[-1] == B.shape[-1], "The last dimension of E & B must match (E: %s, B: %s)" % (E.shape, B.shape)) if E is None and M is not None: warnings.warn("M is supplied but will be ignored because E is not supplied") # perform expensive check if debug mode is enabled if is_debug_enabled(): A.check() if M is not None: M.check() if method is None: method = "exactsolve" # TODO: do a proper method selection based on the size if method == "exactsolve": return exactsolve(A, B, E, M) else: fwd_options["method"] = method # get the unique parameters of A params = A.getlinopparams() mparams = M.getlinopparams() if M is not None else [] na = len(params) return solve_torchfcn.apply( A, B, E, M, posdef, fwd_options, bck_options, na, *params, *mparams)