def __init__(self, space): """Initialize an instance. Parameters ---------- space : `FnBase` The domain of the operator. """ if not isinstance(space, LinearSpace): raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) if _is_integer_only_ufunc(name) and not is_int_dtype(space.dtype): raise ValueError("ufunc '{}' only defined with integral dtype" "".format(name)) if nargin == 1: domain = space else: domain = ProductSpace(space, nargin) if nargout == 1: range = space else: range = ProductSpace(space, nargout) linear = name in LINEAR_UFUNCS Operator.__init__(self, domain=domain, range=range, linear=linear)
def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Scaling parameter in the proximal operator. """ if isinstance(space, ProductSpace) and space.is_power_space: self.exponent = space[0].element(exponent) elif isinstance(space, DiscreteLp): self.exponent = space.element(exponent) else: raise TypeError('space must be a `DiscreteLp` instance or ' 'a power space of those, got {!r}' ''.format(space)) if g is not None: self.g = self.domain.element(g) else: self.g = None Operator.__init__(self, domain=space, range=space, linear=False) self.sigma = float(sigma) self.impl = impl
def __init__(self, space): """Initialize an instance. Parameters ---------- space : `TensorSpace` The domain of the operator. """ if not isinstance(space, LinearSpace): raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) if nargin == 1: domain = space0 = space dtypes = [space.dtype] elif nargin == len(space) == 2 and isinstance(space, ProductSpace): domain = space space0 = space[0] dtypes = [space[0].dtype, space[1].dtype] else: domain = ProductSpace(space, nargin) space0 = space dtypes = [space.dtype, space.dtype] dts_out = dtypes_out(name, dtypes) if nargout == 1: range = space0.astype(dts_out[0]) else: range = ProductSpace(space0.astype(dts_out[0]), space0.astype(dts_out[1])) linear = name in LINEAR_UFUNCS Operator.__init__(self, domain=domain, range=range, linear=linear)
def __init__(self, map_type, fset, partition, dspace, linear=False, **kwargs): """Initialize a new instance. Parameters ---------- map_type : {'sampling', 'interpolation'} The type of operator fset : `FunctionSet` The non-discretized (abstract) set of functions to be discretized partition : `RectPartition` Partition of (a subset of) ``fset.domain`` based on a `RectGrid`. dspace : `NtuplesBase` Data space providing containers for the values of a discretized object. Its `NtuplesBase.size` must be equal to the total number of grid points. linear : bool, optional Create a linear operator if ``True``, otherwise a non-linear operator. order : {'C', 'F'}, optional Ordering of the axes in the data storage. 'C' means the first axis varies slowest, the last axis fastest; vice versa for 'F'. Default: 'C' """ map_type_ = str(map_type).lower() if map_type_ not in ('sampling', 'interpolation'): raise ValueError('`map_type` {} not understood' ''.format(map_type)) if not isinstance(fset, FunctionSet): raise TypeError('`fset` {!r} is not a `FunctionSet` ' 'instance'.format(fset)) if not isinstance(partition, RectPartition): raise TypeError('`partition` {!r} is not a `RectPartition` ' 'instance'.format(partition)) if not isinstance(dspace, NtuplesBase): raise TypeError('`dspace` {!r} is not an `NtuplesBase` instance' ''.format(dspace)) if not fset.domain.contains_set(partition): raise ValueError('{} not contained in the domain {} ' 'of the function set {}' ''.format(partition, fset.domain, fset)) if dspace.size != partition.size: raise ValueError('size {} of the data space {} not equal ' 'to the size {} of the partition' ''.format(dspace.size, dspace, partition.size)) domain = fset if map_type_ == 'sampling' else dspace range = dspace if map_type_ == 'sampling' else fset Operator.__init__(self, domain, range, linear=linear) self.__partition = partition if self.is_linear: if not isinstance(fset, FunctionSpace): raise TypeError('`fset` {!r} is not a `FunctionSpace` ' 'instance'.format(fset)) if not isinstance(dspace, FnBase): raise TypeError('`dspace` {!r} is not an `FnBase` instance' ''.format(dspace)) if fset.field != dspace.field: raise ValueError('`field` {} of the function space and `field`' ' {} of the data space are not equal' ''.format(fset.field, dspace.field)) order = str(kwargs.pop('order', 'C')) if str(order).upper() not in ('C', 'F'): raise ValueError('`order` {!r} not recognized'.format(order)) else: self.__order = str(order).upper()
def __init__(self, stepsize): Operator.__init__(self, func.domain, func.domain, False) self.stepsize = stepsize
def __init__(self, operator, point, method='forward', step=None): """Initialize a new instance. Parameters ---------- operator : `Operator` The operator whose derivative should be computed numerically. Its domain and range must be `FnBase` spaces. point : ``operator.domain`` `element-like` The point to compute the derivative in. method : {'backward', 'forward', 'central'} The method to use to compute the derivative. step : float The step length used in the derivative computation. Default: selects the step according to the dtype of the space. Examples -------- Compute a numerical estimate of the derivative (Hessian) of the squared L2 norm: >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> hess = NumericalDerivative(func.gradient, [1, 1, 1]) >>> hess([0, 0, 1]) rn(3).element([0.0, 0.0, 2.0]) Find the Hessian matrix: >>> odl.matrix_representation(hess) array([[ 2., 0., 0.], [ 0., 2., 0.], [ 0., 0., 2.]]) Notes ----- If the operator is :math:`A` and step size :math:`h` is used, the derivative in the point :math:`x` and direction :math:`dx` is computed as follows. ``method='backward'``: .. math:: \\partial A(x)(dx) = (A(x) - A(x - dx \\cdot h / \| dx \|)) \\cdot \\frac{\| dx \|}{h} ``method='forward'``: .. math:: \\partial A(x)(dx) = (A(x + dx \\cdot h / \| dx \|) - A(x)) \\cdot \\frac{\| dx \|}{h} ``method='central'``: .. math:: \\partial A(x)(dx) = (A(x + dx \\cdot h / (2 \| dx \|)) - A(x - dx \\cdot h / (2 \| dx \|)) \\cdot \\frac{\| dx \|}{h} The number of operator evaluations is ``2``, regardless of parameters. """ if not isinstance(operator, Operator): raise TypeError('`operator` has to be an `Operator` instance') if not isinstance(operator.domain, FnBase): raise TypeError('`operator.domain` has to be an `FnBase` ' 'instance') if not isinstance(operator.range, FnBase): raise TypeError('`operator.range` has to be an `FnBase` ' 'instance') self.operator = operator self.point = operator.domain.element(point) if step is None: # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. self.step = np.sqrt(np.finfo(operator.domain.dtype).eps) else: self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): raise ValueError("`method` '{}' not understood").format(method_in) Operator.__init__(self, operator.domain, operator.range, linear=True)
def __init__(self, functional, method='forward', step=None): """Initialize a new instance. Parameters ---------- functional : `Functional` The functional whose gradient should be computed. Its domain must be an `FnBase` space. method : {'backward', 'forward', 'central'} The method to use to compute the gradient. step : float The step length used in the derivative computation. Default: selects the step according to the dtype of the space. Examples -------- >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> grad([1, 1, 1]) rn(3).element([2.0, 2.0, 2.0]) The gradient gives the correct value with sufficiently small step size: >>> grad([1, 1, 1]) == func.gradient([1, 1, 1]) True If the step is too large the result is not correct: >>> grad = NumericalGradient(func, step=0.5) >>> grad([1, 1, 1]) rn(3).element([2.5, 2.5, 2.5]) But it can be improved by using the more accurate ``method='central'``: >>> grad = NumericalGradient(func, method='central', step=0.5) >>> grad([1, 1, 1]) rn(3).element([2.0, 2.0, 2.0]) Notes ----- If the functional is :math:`f` and step size :math:`h` is used, the gradient is computed as follows. ``method='backward'``: .. math:: (\\nabla f(x))_i = \\frac{f(x) - f(x - h e_i)}{h} ``method='forward'``: .. math:: (\\nabla f(x))_i = \\frac{f(x + h e_i) - f(x)}{h} ``method='central'``: .. math:: (\\nabla f(x))_i = \\frac{f(x + (h/2) e_i) - f(x - (h/2) e_i)}{h} The number of function evaluations is ``functional.domain.size + 1`` if ``'backward'`` or ``'forward'`` is used and ``2 * functional.domain.size`` if ``'central'`` is used. On large domains this will be computationally infeasible. """ if not isinstance(functional, Functional): raise TypeError('`functional` has to be a `Functional` instance') if not isinstance(functional.domain, FnBase): raise TypeError('`functional.domain` has to be an `FnBase` ' 'instance') self.functional = functional if step is None: # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. self.step = np.sqrt(np.finfo(functional.domain.dtype).eps) else: self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): raise ValueError("`method` '{}' not understood").format(method_in) Operator.__init__(self, functional.domain, functional.domain, linear=functional.is_linear)
def __init__(self, operator, point, method='forward', step=None): """Initialize a new instance. Parameters ---------- operator : `Operator` The operator whose derivative should be computed numerically. Its domain and range must be `FnBase` spaces. point : ``operator.domain`` `element-like` The point to compute the derivative in. method : {'backward', 'forward', 'central'}, optional The method to use to compute the derivative. step : float, optional The step length used in the derivative computation. Default: selects the step according to the dtype of the space. Examples -------- Compute a numerical estimate of the derivative (Hessian) of the squared L2 norm: >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> hess = NumericalDerivative(func.gradient, [1, 1, 1]) >>> hess([0, 0, 1]) rn(3).element([0.0, 0.0, 2.0]) Find the Hessian matrix: >>> odl.matrix_representation(hess) array([[ 2., 0., 0.], [ 0., 2., 0.], [ 0., 0., 2.]]) Notes ----- If the operator is :math:`A` and step size :math:`h` is used, the derivative in the point :math:`x` and direction :math:`dx` is computed as follows. ``method='backward'``: .. math:: \\partial A(x)(dx) = (A(x) - A(x - dx \\cdot h / \| dx \|)) \\cdot \\frac{\| dx \|}{h} ``method='forward'``: .. math:: \\partial A(x)(dx) = (A(x + dx \\cdot h / \| dx \|) - A(x)) \\cdot \\frac{\| dx \|}{h} ``method='central'``: .. math:: \\partial A(x)(dx) = (A(x + dx \\cdot h / (2 \| dx \|)) - A(x - dx \\cdot h / (2 \| dx \|)) \\cdot \\frac{\| dx \|}{h} The number of operator evaluations is ``2``, regardless of parameters. """ if not isinstance(operator, Operator): raise TypeError('`operator` has to be an `Operator` instance') if not isinstance(operator.domain, FnBase): raise TypeError('`operator.domain` has to be an `FnBase` ' 'instance') if not isinstance(operator.range, FnBase): raise TypeError('`operator.range` has to be an `FnBase` ' 'instance') self.operator = operator self.point = operator.domain.element(point) if step is None: # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. self.step = np.sqrt(np.finfo(operator.domain.dtype).eps) else: self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): raise ValueError("`method` '{}' not understood").format(method_in) Operator.__init__(self, operator.domain, operator.range, linear=True)
def __init__(self, functional, method='forward', step=None): """Initialize a new instance. Parameters ---------- functional : `Functional` The functional whose gradient should be computed. Its domain must be an `FnBase` space. method : {'backward', 'forward', 'central'}, optional The method to use to compute the gradient. step : float, optional The step length used in the derivative computation. Default: selects the step according to the dtype of the space. Examples -------- >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> grad([1, 1, 1]) rn(3).element([2.0, 2.0, 2.0]) The gradient gives the correct value with sufficiently small step size: >>> grad([1, 1, 1]) == func.gradient([1, 1, 1]) True If the step is too large the result is not correct: >>> grad = NumericalGradient(func, step=0.5) >>> grad([1, 1, 1]) rn(3).element([2.5, 2.5, 2.5]) But it can be improved by using the more accurate ``method='central'``: >>> grad = NumericalGradient(func, method='central', step=0.5) >>> grad([1, 1, 1]) rn(3).element([2.0, 2.0, 2.0]) Notes ----- If the functional is :math:`f` and step size :math:`h` is used, the gradient is computed as follows. ``method='backward'``: .. math:: (\\nabla f(x))_i = \\frac{f(x) - f(x - h e_i)}{h} ``method='forward'``: .. math:: (\\nabla f(x))_i = \\frac{f(x + h e_i) - f(x)}{h} ``method='central'``: .. math:: (\\nabla f(x))_i = \\frac{f(x + (h/2) e_i) - f(x - (h/2) e_i)}{h} The number of function evaluations is ``functional.domain.size + 1`` if ``'backward'`` or ``'forward'`` is used and ``2 * functional.domain.size`` if ``'central'`` is used. On large domains this will be computationally infeasible. """ if not isinstance(functional, Functional): raise TypeError('`functional` has to be a `Functional` instance') if not isinstance(functional.domain, FnBase): raise TypeError('`functional.domain` has to be an `FnBase` ' 'instance') self.functional = functional if step is None: # Use half of the number of digits as machine epsilon, this # "usually" gives a good balance between precision and numerical # stability. self.step = np.sqrt(np.finfo(functional.domain.dtype).eps) else: self.step = float(step) self.method, method_in = str(method).lower(), method if self.method not in ('backward', 'forward', 'central'): raise ValueError("`method` '{}' not understood").format(method_in) Operator.__init__(self, functional.domain, functional.domain, linear=functional.is_linear)
def __init__(self, domain, range, func, adjoint=None, linear=False): Operator.__init__(self, domain, range, linear) self.func = func self.adjoint_func = adjoint