def __init__(self, space): """Initialize an instance. Parameters ---------- space : `TensorSpace` The domain of the operator. """ if not isinstance(space, LinearSpace): raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) if nargin == 1: domain = space0 = space dtypes = [space.dtype] elif nargin == len(space) == 2 and isinstance(space, ProductSpace): domain = space space0 = space[0] dtypes = [space[0].dtype, space[1].dtype] else: domain = ProductSpace(space, nargin) space0 = space dtypes = [space.dtype, space.dtype] dts_out = dtypes_out(name, dtypes) if nargout == 1: range = space0.astype(dts_out[0]) else: range = ProductSpace(space0.astype(dts_out[0]), space0.astype(dts_out[1])) linear = name in LINEAR_UFUNCS Operator.__init__(self, domain=domain, range=range, linear=linear)
def __init__(self, space): """Initialize an instance. Parameters ---------- space : `FnBase` The domain of the operator. """ if not isinstance(space, LinearSpace): raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) if _is_integer_only_ufunc(name) and not is_int_dtype(space.dtype): raise ValueError("ufunc '{}' only defined with integral dtype" "".format(name)) if nargin == 1: domain = space else: domain = ProductSpace(space, nargin) if nargout == 1: range = space else: range = ProductSpace(space, nargout) linear = name in LINEAR_UFUNCS Operator.__init__(self, domain=domain, range=range, linear=linear)
def fitting_kernel(space, kernel): kspace = ProductSpace(space, self.dim) # Create the array of kernel values on the grid points discretized_kernel = kspace.element( [space.element(kernel) for _ in range(self.dim)]) return discretized_kernel
def IntegrateTemplateEvol(template,zeta,k0,k1): N=len(zeta)-1 inv_N=1/N series_image_space_integration = ProductSpace(template.space,N+1) I=series_image_space_integration.element() I[0]=template.copy() for i in range(k0,k1): I[i+1]=I[i]+ inv_N * zeta[i] return I
def ShootTemplateFromVectorFields(vector_field_list, template): N=len(vector_field_list)-1 inv_N=1/N series_image_space_integration = ProductSpace(template.space,N+1) I=series_image_space_integration.element() I[0]=template.copy() for i in range(0,N): I[i+1]=template.space.element( linear_deform(I[i], -inv_N * vector_field_list[i])).copy() return I
def __init__(self, N, kernel, space): self.N = N self.kernel = kernel self.space = space self.dim = space.ndim from odl.space import ProductSpace # Compute the FT of kernel in fitting term def fitting_kernel(space, kernel): kspace = ProductSpace(space, self.dim) # Create the array of kernel values on the grid points discretized_kernel = kspace.element( [space.element(kernel) for _ in range(self.dim)]) return discretized_kernel def padded_ft_op(space, padded_size): """Create zero-padding fft setting Parameters ---------- space : the space needs to do FT padding_size : the percent for zero padding """ padded_op = ResizingOperator( space, ran_shp=[padded_size for _ in range(space.ndim)]) shifts = [not s % 2 for s in space.shape] ft_op = FourierTransform(padded_op.range, halfcomplex=False, shift=shifts) return ft_op * padded_op # FFT setting for data matching term, 1 means 100% padding padded_size = 2 * space.shape[0] padded_ft_fit_op = padded_ft_op(space, padded_size) vectorial_ft_fit_op = DiagonalOperator(*([padded_ft_fit_op] * self.dim)) discretized_kernel = fitting_kernel(space, kernel) ft_kernel_fitting = vectorial_ft_fit_op(discretized_kernel) self.vectorial_ft_fit_op = vectorial_ft_fit_op self.ft_kernel_fitting = ft_kernel_fitting super().__init__( domain=ProductSpace(space, 2), #range=ProductSpace(space,self.N+1), range=ProductSpace(ProductSpace(space, self.N + 1), ProductSpace(space, self.N + 1), ProductSpace(space.tangent_bundle, self.N + 1)), linear=False)
def ShootSourceTermBackwardlist(vector_field_list, zeta): N=len(vector_field_list)-1 inv_N=1/N series_image_space_integration = ProductSpace(zeta[0].space,N+1) zeta_transp=series_image_space_integration.element() space_zeta=zeta[0].space for i in range(0,N+1): temp=zeta[i].copy() for j in range(i): temp=space_zeta.element( linear_deform(temp, inv_N * vector_field_list[i-1-j])).copy() zeta_transp[i]=temp.copy() return zeta_transp
def __pow__(self, shape): """Return ``self ** shape``. Notes ----- This can be overridden by subclasses in order to give better memory coherence or otherwise a better interface. Examples -------- Create simple power space: >>> r2 = odl.rn(2) >>> r2 ** 4 ProductSpace(rn(2), 4) Multiple powers work as expected: >>> r2 ** (4, 2) ProductSpace(ProductSpace(rn(2), 4), 2) """ from odl.space import ProductSpace try: shape = (int(shape),) except TypeError: shape = tuple(shape) pspace = self for n in shape: pspace = ProductSpace(pspace, n) return pspace
def __init__(self, space, a, b): """Initialize a new instance. Parameters ---------- space : `LinearSpace` Space of elements which the operator is acting on. a, b : ``space.field`` elements Scalars to multiply ``x[0]`` and ``x[1]`` with, respectively. Examples -------- >>> r3 = odl.rn(3) >>> r3xr3 = odl.ProductSpace(r3, r3) >>> xy = r3xr3.element([[1, 2, 3], [1, 2, 3]]) >>> z = r3.element() >>> op = LinCombOperator(r3, 1.0, 1.0) >>> op(xy, out=z) # Returns z rn(3).element([ 2., 4., 6.]) >>> z rn(3).element([ 2., 4., 6.]) """ domain = ProductSpace(space, space) super(LinCombOperator, self).__init__(domain, space, linear=True) self.a = a self.b = b
def __init__(self, alphas, control_points, discr_space, ft_kernel): """Initialize a new instance. Parameters ---------- alphas : `ProductSpaceElement` Displacement parameters in which the derivative is evaluated control_points : `TensorGrid` or `array-like` The points ``x_j`` controlling the deformation. They can be given either as a tensor grid or as a point array. In the latter case, its shape must be ``(N, n)``, where ``n`` is the dimension of the template space, and ``N`` the number of ``alpha_j``, i.e. the size of (each component of) ``par_space``. discr_space : `DiscreteSpace` Space of the image grid of the template. kernel : `callable` Function to determine the kernel at the control points ``K(y_j)`` The function must accept a real variable and return a real number. """ super().__init__(alphas, control_points, discr_space, ft_kernel) # Switch domain and range self.discr_space = discr_space self.range_space = ProductSpace(self.discr_space, self.discr_space.ndim) Operator.__init__(self, self.range_space, alphas.space, linear=True)
def _vectorized_kernel(space, kernel): """Compute the vectorized discrete kernel ``K``. Parameters ---------- space : the space used to define kernel ``K``. kernel : the used kernel function for data fitting term. Returns ------- discretized_kernel : `ProductSpaceElement` The vectorized discrete kernel with the space dimension """ kspace = ProductSpace(space, space.ndim) # Create the array of kernel values on the grid points discretized_kernel = kspace.element( [space.element(kernel) for _ in range(space.ndim)]) return discretized_kernel
def ComputeMetamorphosisListInt(self,vector_field_list,zeta_list): image_list=ProductSpace(self.template.space,self.N+1).element() zeta_transp=ShootSourceTermBackwardlist(vector_field_list, zeta_list).copy() template_evolution=IntegrateTemplateEvol(self.template,zeta_transp,0,self.N) # We build for each j image_list[j]= template_t_j \circ \phi_{t_j}^-1 for k in range(self.N +1): image_list[k]=ShootTemplateFromVectorFieldsFinal( vector_field_list,template_evolution[k],0,k).copy() return image_list
def _call(self, X): """ Shooting equations and Integration Args: X = [I0, p] with I0 template image and p scalarvalued momentum Returns: I: deformed Image over time (N+1 timesteps) M: vectorvalued momentum in eulerian coordinates over time V: vectorfields over time """ template = X[0] P0 = X[1] series_image_space_integration = ProductSpace(template.space, self.N + 1) series_vector_space_integration = ProductSpace( template.space.tangent_bundle, self.N + 1) inv_N = 1 / self.N I = series_image_space_integration.element() I[0] = template.copy() P = series_image_space_integration.element() P[0] = P0.copy() U = series_vector_space_integration.element() # Create the gradient op grad_op = Gradient(domain=self.space, method='forward', pad_mode='symmetric') # Create the divergence op div_op = -grad_op.adjoint for i in range(self.N): # compute vectorvalued momentum from scalarvalued momentum gradI = grad_op(I[i]) m = self.space.tangent_bundle.element(-P[i] * gradI) # Kernel convolution to obtain velocity fields from momentum # u = K*m U[i] = (2 * np.pi)**( self.dim / 2.0) * self.vectorial_ft_fit_op.inverse( self.vectorial_ft_fit_op(m) * self.ft_kernel_fitting) # Integration step dtI = -sum(gradI * U[i]) dtP = -div_op(P[i] * U[i]) I[i + 1] = I[i] + inv_N * dtI P[i + 1] = P[i] + inv_N * dtP U[self.N] = (2 * np.pi)**( self.dim / 2.0) * self.vectorial_ft_fit_op.inverse( self.vectorial_ft_fit_op( self.space.tangent_bundle.element(-P[self.N] * gradI)) * self.ft_kernel_fitting) return I, P, U
def __init__(self, Shooting, forwardOp, norm, data, space, kernel, reg_param): self.Shooting = Shooting self.forwardOp = forwardOp self.norm = norm self.data = data self.space = space self.kernel = kernel self.reg_param = reg_param self.attach = self.norm*(self.data - self.forwardOp) super().__init__(domain=ProductSpace(self.space, self.space.tangent_bundle), range=norm.range, linear=False)
def __init__(self, sspace, vecfield, vfspace=None, weighting=None): """Initialize a new instance. Parameters ---------- sspace : `LinearSpace` "Scalar" space on which the operator acts vecfield : `range` `element-like` Vector field of the point-wise inner product operator vfspace : `ProductSpace`, optional Space of vector fields to which the operator maps. It must be a power space with ``sspace`` as base space. This option is intended to enforce an operator range with a certain weighting. Default: ``ProductSpace(space, len(vecfield), weighting=weighting)`` weighting : `array-like` or float, optional Weighting array or constant of the inner product operator. If an array is given, its length must be equal to ``len(vecfield)``. By default, the weights are is taken from ``range.weighting`` if applicable. Note that this excludes unusual weightings with custom inner product, norm or dist. """ if vfspace is None: vfspace = ProductSpace(sspace, len(vecfield), weighting=weighting) else: if not isinstance(vfspace, ProductSpace): raise TypeError('`vfspace` {!r} is not a ' 'ProductSpace instance'.format(vfspace)) if vfspace[0] != sspace: raise ValueError('base space of the range is different from ' 'the given scalar space ({!r} != {!r})' ''.format(vfspace[0], sspace)) super().__init__(adjoint=True, vfspace=vfspace, vecfield=vecfield, weighting=weighting) # Get weighting from range if hasattr(self.range.weighting, 'array'): self.__ran_weights = self.range.weighting.array elif hasattr(self.range.weighting, 'const'): self.__ran_weights = (self.range.weighting.const * np.ones(len(self.range))) else: raise ValueError('weighting scheme {!r} of the range does ' 'not define a weighting array or constant' ''.format(self.range.weighting))
def ComputeMetamorphosisFromZetaTransp(self,vector_field_list,zeta_transp): image_list=ProductSpace(self.template.space,self.nb_data).element() template_evolution=IntegrateTemplateEvol(self.template,zeta_transp,0,self.N) # We build for each j image_list[j]= template_t_j \circ \phi_{t_j}^-1 for j in range(self.nb_data): delta0=(self.data_time_points[j] -((self.k_j_list[j])/self.N)) template_t_j=self.image_domain.element( linear_deform(template_evolution[self.k_j_list[j]], -delta0 * vector_field_list[self.k_j_list[j]])).copy() #image_t_j_k_j= template_t_j \circ \phi_{\tau_k_j}^-1 image_t_j_k_j=ShootTemplateFromVectorFieldsFinal( vector_field_list,template_t_j,0,self.k_j_list[j]).copy() image_list[j]=self.image_domain.element( linear_deform(image_t_j_k_j, -delta0 * vector_field_list[self.k_j_list[j]])).copy() return image_list
def __init__(self, par_space, control_points, discr_space, ft_kernel): """Initialize a new instance. Parameters ---------- par_space : `ProductSpace` or `Rn` Space of the parameters. For one-dimensional deformations, `Rn` can be used. Otherwise, a `ProductSpace` with ``n`` components is expected. control_points : `TensorGrid` or `array-like` The points ``x_j`` controlling the deformation. They can be given either as a tensor grid or as a point array. In the latter case, its shape must be ``(N, n)``, where ``n`` is the dimension of the template space, and ``N`` the number of ``alpha_j``, i.e. the size of (each component of) ``par_space``. discr_space : `DiscreteSpace` Space of the image grid of the template. ft_kernel : `callable` Function to determine the FT of kernel at the control points ``K(y_j)`` The function must accept a real variable and return a real number. """ if par_space.size != discr_space.ndim: raise ValueError('dimensions of product space and image grid space' ' do not match ({} != {})' ''.format(par_space.size, discr_space.ndim)) self.discr_space = discr_space self.range_space = ProductSpace(self.discr_space, self.discr_space.ndim) super().__init__(par_space, self.range_space, linear=True) self.ft_kernel = ft_kernel if not isinstance(control_points, RectGrid): self._control_pts = np.asarray(control_points) if self._control_pts.shape != (self.num_contr_pts, self.ndim): raise ValueError( 'expected control point array of shape {}, got {}.' ''.format((self.num_contr_pts, self.ndim), self.control_points.shape)) else: self._control_pts = control_points
def __init__(self, operator): self.operator = operator # Kernel convolution v = K*m is done through Fouriertransform def fitting_kernel(space, kernel): kspace = ProductSpace(space, dim) # Create the array of kernel values on the grid points discretized_kernel = kspace.element( [space.element(kernel) for _ in range(dim)]) return discretized_kernel def padded_ft_op(space, padded_size): """Create zero-padding fft setting Parameters ---------- space : the space needs to do FT padding_size : the percent for zero padding """ padded_op = ResizingOperator( space, ran_shp=[padded_size for _ in range(space.ndim)]) shifts = [not s % 2 for s in space.shape] ft_op = FourierTransform( padded_op.range, halfcomplex=False, shift=shifts) return ft_op * padded_op # FFT setting for data matching term, 1 means 100% padding dim = 2 padded_size = 2 * operator.space.shape[0] padded_ft_fit_op = padded_ft_op(operator.space, padded_size) vectorial_ft_fit_op = DiagonalOperator(*([padded_ft_fit_op] * dim)) discretized_kernel = fitting_kernel(operator.space, operator.kernel) ft_kernel_fitting = vectorial_ft_fit_op(discretized_kernel) self.vectorial_ft_fit_op = vectorial_ft_fit_op self.ft_kernel_fitting = ft_kernel_fitting super().__init__(domain=ProductSpace(operator.space, operator.space.tangent_bundle), range=operator.space.tangent_bundle, linear=False)
def __mul__(self, other): """Return ``self * other``. Notes ----- This can be overridden by subclasses in order to give better memory coherence or otherwise a better interface. Examples -------- Create simple product space: >>> r2 = odl.rn(2) >>> r3 = odl.rn(3) >>> r2 * r3 ProductSpace(rn(2), rn(3)) """ from odl.space import ProductSpace if not isinstance(other, LinearSpace): raise TypeError('Can only multiply with `LinearSpace`, got {!r}' ''.format(other)) return ProductSpace(self, other)
def LDDMM_gradient_descent_solver_spatiotemporal(forward_op, noise_proj_data, template, vector_fields, gate_pts, discr_deg, niter, in_niter1, in_inter2, stepsize1, stepsize2, mu_1, mu_2, lamb, kernel, impl1='geom', impl2='least_square', callback=None): """ Solver for spatiotemporal image reconstruction using LDDMM. Notes ----- The model is: .. math:: \min_{v} \lambda * \int_0^1 \|v(t)\|_V^2 dt + \|T(\phi_1.I) - g\|_2^2, where :math:`\phi_1.I := |D\phi_1^{-1}| I(\phi_1^{-1})` is for mass-preserving deformation, instead, :math:`\phi_1.I := I(\phi_1^{-1})` is for geometric deformation. :math:`\phi_1^{-1}` is the inverse of the solution at :math:`t=1` of flow of doffeomorphisms. :math:`|D\phi_1^{-1}|` is the Jacobian determinant of :math:`\phi_1^{-1}`. :math:`T` is the forward operator. If :math:`T` is an identity operator, the above model reduces to image matching. If :math:`T` is a non-identity forward operator, the above model is for shape-based image reconstrction. :math:`g` is the detected data, `data_elem`. :math:`I` is the `template`. :math:`v(t)` is the velocity vector. :math:`V` is a reproducing kernel Hilbert space for velocity vector. :math:`lamb` is the regularization parameter. Parameters ---------- forward_op : `Operator` The forward operator of imaging. data_elem : `DiscreteLpElement` The given data. template : `DiscreteLpElement` Fixed template deformed by the deformation. time_pts : `int` The number of time intervals iter : `int` The given maximum iteration number. eps : `float` The given step size. lamb : `float` The given regularization parameter. It's a weighted value on the regularization-term side. kernel : `function` Kernel function in reproducing kernel Hilbert space. impl1 : {'geom', 'mp'}, optional The given implementation method for group action. The impl1 chooses 'mp' or 'geom', where 'mp' means using mass-preserving method, and 'geom' means using non-mass-preserving geometric method. Its defalt choice is 'geom'. impl2 : {'least square'}, optional The given implementation method for data matching term. Here the implementation only supports the case of least square. callback : `class`, optional Show the intermediate results of iteration. Returns ------- image_N0 : `ProductSpaceElement` The series of images produced by template and velocity field. mp_deformed_image_N0 : `ProductSpaceElement` The series of mass-preserving images produced by template and velocity field. E : `numpy.array` Storage of the energy values for iterations. """ # Max index of discretized points N = gate_pts # Discretized degree M = discr_deg # Compute the max index of discretized points MN = M * N MN1 = MN + 1 # Get the inverse of the number of discretized points inv_MN = 1.0 / MN N1 = N + 1 N2 = 2. / N ss1 = stepsize1 * N2 ss2 = stepsize2 * N2 ss3 = stepsize1 * mu_1 # Create the gradient operator for the squared L2 functional if impl2 == 'least_square': gradS = [forward_op[0].adjoint * (forward_op[0] - noise_proj_data[0])] * N1 for i in range(N): j = i + 1 gradS[j] = forward_op[j].adjoint * (forward_op[j] - noise_proj_data[j]) else: raise NotImplementedError('now only support least square') # Create the space of images image_space = template.space # Get the dimension of the space of images dim = image_space.ndim # Fourier transform setting for data matching term # The padded_size is the size of the padded domain padded_size = 2 * image_space.shape[0] # The pad_ft_op is the operator of Fourier transform # composing with padded operator pad_ft_op = padded_ft_op(image_space, padded_size) # The vectorial_ft_op is a vectorial Fourier transform operator, # which constructs the diagnal element of a matrix. vectorial_ft_op = DiagonalOperator(*([pad_ft_op] * dim)) # Compute the FT of kernel in fitting term discretized_kernel = fitting_kernel(image_space, kernel) ft_kernel_fitting = vectorial_ft_op(discretized_kernel) # Create the space for series deformations and series Jacobian determinant series_image_space = ProductSpace(image_space, MN1) series_backprojection_space = ProductSpace(image_space, N1) series_bp_all = [image_space.element()] * N1 for i in range(N): j = i + 1 series_bp_all[j] = [image_space.element()] * (j * M + 1) # Initialize vector fileds at different time points vector_fields = vector_fields # Initialize two series deformations and series Jacobian determinant image_MN0 = series_image_space.element() if impl1 == 'geom': eta_tt = series_backprojection_space.element() else: raise NotImplementedError('unknown group action') for j in range(MN1): image_MN0[j] = image_space.element(template) eta_tt[0] = gradS[0](image_MN0[0]) series_bp_all[0] = eta_tt[0] for i in range(1, N1): iM = i * M eta_tt[i] = gradS[i](image_MN0[iM]) for j in range(iM + 1): series_bp_all[i][j] = eta_tt[i] # Create the gradient operator grad_op = Gradient(domain=image_space, method='forward', pad_mode='symmetric') # Create the divergence operator, which can be obtained from # the adjoint of gradient operator # div_op = Divergence(domain=pspace, method='forward', pad_mode='symmetric') grad_op_adjoint = grad_op.adjoint div_op = -grad_op.adjoint # Begin iteration for non-mass-preserving case if impl1 == 'geom': print(impl1) # Outer iteration for k in range(niter): print('iter = {!r}'.format(k)) #%%%Setting for getting a proper initial template # Inner iteration for updating template if k == 0: niter1 = 50 # niter1 = in_niter1 else: niter1 = in_niter1 #%%%Solving TV-L2 by Gradient Descent # Store energy E = [] E = np.hstack((E, np.zeros(niter1))) for k1 in range(niter1): image_MN0[0] = template # Update partial of template grad_template = grad_op(template) grad_template_norm = np.sqrt(grad_template[0]**2 + grad_template[1]**2 + 1.0e-12) E[k1] += mu_1 * np.asarray( grad_template_norm).sum() * template.space.cell_volume for i in range(1, N1): E[k1] += 1. / N * np.asarray((forward_op[i](image_MN0[i*M]) - noise_proj_data[i])**2).sum() \ * noise_proj_data[0].space.cell_volume template = template - \ ss3 * grad_op_adjoint(grad_template/grad_template_norm) for j in range(MN): temp1 = j + 1 # Update image_MN0 image_MN0[temp1] = image_space.element( _linear_deform(image_MN0[j], -inv_MN * vector_fields[temp1])) if temp1 % M == 0: temp2 = temp1 // M # print(temp1) # print(temp2) # Update eta_tt eta_tt[temp2] = gradS[temp2](image_MN0[temp1]) # eta_tt[temp2].show('eta_tt[{!r}]'.format(temp2)) series_bp_all[temp2][temp1] = eta_tt[temp2] # the above two lines can be combined into one # series_bp_all[temp2][temp1] = gradS[temp2](image_MN0[temp1]) for l in range(temp1): jacobian_det = image_space.element( 1.0 + inv_MN * div_op(vector_fields[temp1 - l - 1])) # Update eta_tau_tnp series_bp_all[temp2][temp1-l-1] = \ jacobian_det * image_space.element( _linear_deform( series_bp_all[temp2][temp1-l], inv_MN * vector_fields[temp1-l-1])) # Update partial of template template = template - \ ss1 * series_bp_all[temp2][0] for k2 in range(in_inter2): image_MN0[0] = template series_bp_all[0] = gradS[0](image_MN0[0]) for j in range(MN): temp1 = j + 1 # Update image_MN0 image_MN0[temp1] = image_space.element( _linear_deform(image_MN0[j], -inv_MN * vector_fields[temp1])) if temp1 % M == 0: temp2 = temp1 // M # Update eta_tt eta_tt[temp2] = gradS[temp2](image_MN0[temp1]) series_bp_all[temp2][temp1] = eta_tt[temp2] for l in range(temp1): jacobian_det = image_space.element( 1.0 + inv_MN * div_op(vector_fields[temp1 - l - 1])) # Update eta_tau_t series_bp_all[temp2][temp1-l-1] = \ jacobian_det * image_space.element( _linear_deform( series_bp_all[temp2][temp1-l], inv_MN * vector_fields[temp1-l-1])) for j in range(MN1): tmp1 = grad_op(image_MN0[j]) tmp2 = int(np.ceil(j * 1. / M)) tmp0 = tmp2 + 1 # print(tmp2) if tmp2 == 0: tmp3 = image_space.zero() tmp4 = image_space.tangent_bundle.zero() else: tmp3 = series_bp_all[tmp2][j] tmp4 = vector_fields[j] for i in range(tmp0, N1): tmp3 = tmp3 + series_bp_all[i][j] tmp4 = tmp4 + vector_fields[j] for i in range(dim): tmp1[i] *= tmp3 tmp5 = (2 * np.pi)**(dim / 2.0) * vectorial_ft_op.inverse( vectorial_ft_op(tmp1) * ft_kernel_fitting) # Update vector_fields vector_fields[j] = vector_fields[j] + ss2 * (tmp5 - mu_2 * tmp4) return template, vector_fields, image_MN0 else: raise NotImplementedError('unknown group action')
def ufunc_class_factory(name, nargin, nargout, docstring): """Create a Ufunc `Operator` from a given specification.""" assert 0 <= nargin <= 2 def __init__(self, space): """Initialize an instance. Parameters ---------- space : `TensorSpace` The domain of the operator. """ if not isinstance(space, LinearSpace): raise TypeError('`space` {!r} not a `LinearSpace`'.format(space)) if nargin == 1: domain = space0 = space dtypes = [space.dtype] elif nargin == len(space) == 2 and isinstance(space, ProductSpace): domain = space space0 = space[0] dtypes = [space[0].dtype, space[1].dtype] else: domain = ProductSpace(space, nargin) space0 = space dtypes = [space.dtype, space.dtype] dts_out = dtypes_out(name, dtypes) if nargout == 1: range = space0.astype(dts_out[0]) else: range = ProductSpace(space0.astype(dts_out[0]), space0.astype(dts_out[1])) linear = name in LINEAR_UFUNCS Operator.__init__(self, domain=domain, range=range, linear=linear) def _call(self, x, out=None): """Return ``self(x)``.""" # TODO: use `__array_ufunc__` when implemented on `ProductSpace`, # or try both if out is None: if nargin == 1: return getattr(x.ufuncs, name)() else: return getattr(x[0].ufuncs, name)(*x[1:]) else: if nargin == 1: return getattr(x.ufuncs, name)(out=out) else: return getattr(x[0].ufuncs, name)(*x[1:], out=out) def __repr__(self): """Return ``repr(self)``.""" return '{}({!r})'.format(name, self.domain) # Create example (also functions as doctest) if 'shift' in name or 'bitwise' in name or name == 'invert': dtype = int else: dtype = float space = tensor_space(3, dtype=dtype) if nargin == 1: vec = space.element([-1, 1, 2]) arg = '{}'.format(vec) with np.errstate(all='ignore'): result = getattr(vec.ufuncs, name)() else: vec = space.element([-1, 1, 2]) vec2 = space.element([3, 4, 5]) arg = '[{}, {}]'.format(vec, vec2) with np.errstate(all='ignore'): result = getattr(vec.ufuncs, name)(vec2) if nargout == 2: result_space = ProductSpace(vec.space, 2) result = repr(result_space.element(result)) examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space, name=name, arg=arg, result=result) full_docstring = docstring + examples_docstring attributes = { "__init__": __init__, "_call": _call, "derivative": derivative_factory(name), "__repr__": __repr__, "__doc__": full_docstring } full_name = name + '_op' return type(full_name, (Operator, ), attributes)
def LDDMM_Beg_solver(template, reference, time_pts, niter, eps, lamb, kernel, callback=None): """ Solver for the shape-based reconstruction using LDDMM_Beg. Notes ----- The model is: .. math:: \min_{v(t) \in V, \phi_{0,1}^v \in G_v} \lambda \int_0^1 \|v(t)\|_V^2 dt + \int_\Omega |(\phi_{0,1}^v.I_0) - I_1|^2 dx Here :math:`I_0` is the template. :math:`v(t)` is the velocity vector. :math:`V` is a reproducing kernel Hilbert space for the velocity vector. :math:`I_1` is the reference. :math:`\lambda` is the regularization parameter. :math:`\phi_{0, 1}^v.I := I \circ \phi_{1, 0}^v` is for geometric deformation, :math:`\phi_{1, 0}^v` is the inverse of :math:`\phi_{0, 1}^v` i.e. the solution at :math:`t=1` of flow of diffeomorphisms. :math:`|D\phi_{1, 0}^v|` is the Jacobian determinant of :math:`\phi_{1, 0}^v`. Parameters ---------- template : `DiscreteLpElement` Fixed template. reference: `DiscreteLpElement` Fixed reference. time_pts : `int` The number of time intervals niter : `int` The given maximum iteration number. eps : `float` The given step size. lamb : `float` The given regularization parameter. It's a weighted value on the regularization-term side. kernel : `function` Kernel function in reproducing kernel Hilbert space. callback : `class`, optional Show the intermediate results of iteration. Returns ------- image_N0 : `ProductSpaceElement` The series of images produced by template and velocity field. E : `numpy.array` Storage of the energy values for iterations. """ # Give the number of time intervals N = time_pts # Get the inverse of time intervals inv_N = 1.0 / N # Create the space of images image_space = template.space #image_space= rec_space # Get the dimension of the space of images dim = image_space.ndim # Fourier transform setting for data matching term # The padded_size is the size of the padded domain padded_size = 2 * image_space.shape[0] # Create operator of Fourier transform composing with padded operator pad_ft_op = _padded_ft_op(image_space, padded_size) # Create vectorial Fourier transform operator # Construct the diagnal element of a matrix operator vectorial_ft_op = DiagonalOperator(*([pad_ft_op] * dim)) # Compute the FT of kernel in fitting term discretized_kernel = _vectorized_kernel(image_space, kernel) ft_kernel_fitting = vectorial_ft_op(discretized_kernel) # Create the space for series deformations and series Jacobian determinant pspace = image_space.tangent_bundle series_pspace = ProductSpace(pspace, N + 1) series_image_space = ProductSpace(image_space, N + 1) # Initialize vector fileds at different time points vector_fields = series_pspace.zero() # Give the initial two series deformations and series Jacobian determinant image_N0 = series_image_space.element() image_N1 = series_image_space.element() detDphi_N1 = series_image_space.element() for i in range(N + 1): image_N0[i] = image_space.element(template) image_N1[i] = image_space.element(reference) detDphi_N1[i] = image_space.one() # Create the gradient operator grad_op = Gradient(domain=image_space, method='forward', pad_mode='symmetric') # Create the divergence operator, which can be obtained from # the adjoint of gradient operator # div_op = Divergence(domain=pspace, method='forward', pad_mode='symmetric') div_op = -grad_op.adjoint # Store energy E = [] kE = len(E) E = np.hstack((E, np.zeros(niter))) # Begin iteration for k in range(niter): # Update the velocity field for i in range(N + 1): #first term term1_tmp1 = 2 * np.abs(image_N0[i] - image_N1[i]) * (detDphi_N1[i]) term1_tmp = grad_op(image_N0[i]) for j in range(dim): term1_tmp[j] *= term1_tmp1 tmp3 = (2 * np.pi)**(dim / 2.0) * vectorial_ft_op.inverse( vectorial_ft_op(term1_tmp) * ft_kernel_fitting) vector_fields[i] = (vector_fields[i] - eps * (lamb * vector_fields[i] - tmp3)) # Update image_N0,image_N1 and detDphi_N0 detDphi_N1 for i in range(N): # Update image_N0[i+1] by image_N0[i] and vector_fields[i+1] image_N0[i + 1] = image_space.element( _linear_deform(image_N0[i], -inv_N * vector_fields[i + 1])) # Update image_N1[N-i-1] by image_N1[N-i] and vector_fields[N-i-1] image_N1[N - i - 1] = image_space.element( _linear_deform(image_N1[N - i], inv_N * vector_fields[N - i - 1])) # # Update detDphi_N1[N-i-1] by detDphi_N1[N-i] # jacobian_det = image_domain.element( # np.exp(inv_N * div_o p(vector_fields[N-i-1]))) jacobian_det = image_space.element( 1.0 + inv_N * div_op(vector_fields[N - i - 1])) detDphi_N1[N - i - 1] = (jacobian_det * image_space.element( _linear_deform(detDphi_N1[N - i], inv_N * vector_fields[N - i - 1]))) # Update the deformed template PhiStarI = image_N0[N] # Show intermediate result if callback is not None: callback(PhiStarI) # Compute the energy of the data fitting term E[k + kE] += np.asarray((PhiStarI - reference)**2).sum() return image_N0, E
def __init__(self, operators, domain=None, range=None): """Initialize a new instance. Parameters ---------- operators : `array-like` An array of `Operator`'s domain : `ProductSpace`, optional Domain of the operator. If not provided, it is tried to be inferred from the operators. This requires each **column** to contain at least one operator. range : `ProductSpace`, optional Range of the operator. If not provided, it is tried to be inferred from the operators. This requires each **row** to contain at least one operator. Examples -------- >>> r3 = odl.rn(3) >>> X = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = X.element([[1, 2, 3], [4, 5, 6]]) Sum of elements: >>> prod_op = ProductSpaceOperator([I, I]) >>> prod_op(x) ProductSpace(rn(3), 1).element([ [5.0, 7.0, 9.0] ]) Diagonal operator -- 0 or ``None`` means ignore, or the implicit zero operator: >>> prod_op = ProductSpaceOperator([[I, 0], [0, I]]) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [1.0, 2.0, 3.0], [4.0, 5.0, 6.0] ]) Complicated combinations: >>> prod_op = ProductSpaceOperator([[I, I], [I, 0]]) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [5.0, 7.0, 9.0], [1.0, 2.0, 3.0] ]) """ # Lazy import to improve `import odl` time import scipy.sparse # Validate input data if domain is not None: if not isinstance(domain, ProductSpace): raise TypeError('`domain` {!r} not a ProductSpace instance' ''.format(domain)) if domain.is_weighted: raise NotImplementedError('weighted spaces not supported') if range is not None: if not isinstance(range, ProductSpace): raise TypeError('`range` {!r} not a ProductSpace instance' ''.format(range)) if range.is_weighted: raise NotImplementedError('weighted spaces not supported') # Convert ops to sparse representation self.ops = scipy.sparse.coo_matrix(operators) if not all(isinstance(op, Operator) for op in self.ops.data): raise TypeError('`operators` {!r} must be a matrix of Operators' ''.format(operators)) # Set domain and range (or verify if given) if domain is None: domains = [None] * self.ops.shape[1] else: domains = domain if range is None: ranges = [None] * self.ops.shape[0] else: ranges = range for row, col, op in zip(self.ops.row, self.ops.col, self.ops.data): if domains[col] is None: domains[col] = op.domain elif domains[col] != op.domain: raise ValueError('column {}, has inconsistent domains, ' 'got {} and {}' ''.format(col, domains[col], op.domain)) if ranges[row] is None: ranges[row] = op.range elif ranges[row] != op.range: raise ValueError('row {}, has inconsistent ranges, ' 'got {} and {}' ''.format(row, ranges[row], op.range)) if domain is None: for col, sub_domain in enumerate(domains): if sub_domain is None: raise ValueError('col {} empty, unable to determine ' 'domain, please use `domain` parameter' ''.format(col)) domain = ProductSpace(*domains) if range is None: for row, sub_range in enumerate(ranges): if sub_range is None: raise ValueError('row {} empty, unable to determine ' 'range, please use `range` parameter' ''.format(row)) range = ProductSpace(*ranges) # Set linearity linear = all(op.is_linear for op in self.ops.data) super(ProductSpaceOperator, self).__init__( domain=domain, range=range, linear=linear)
def __init__(self, domain=None, range=None, method='forward', pad_mode='constant', pad_const=0): """Initialize a new instance. Zero padding is assumed for the adjoint of the `Gradient` operator to match negative `Divergence` operator. Parameters ---------- domain : `DiscreteLp`, optional Space of elements which the operator acts on. This is required if ``range`` is not given. range : power space of `DiscreteLp`, optional Space of elements to which the operator maps. This is required if ``domain`` is not given. method : {'forward', 'backward', 'central'}, optional Finite difference method to be used. pad_mode : string, optional The padding mode to use outside the domain. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the outmost values. ``'periodic'``: Fill in values from the other side, keeping the order. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis where padding is applied. ``'order2'``: Extend with second order accuracy (ensures continuity of the second derivative). This requires at least 3 values along each axis. pad_const : float, optional For ``pad_mode == 'constant'``, ``f`` assumes ``pad_const`` for indices outside the domain of ``f`` Examples -------- Creating a Gradient operator: >>> dom = odl.uniform_discr([0, 0], [1, 1], (10, 20)) >>> ran = odl.ProductSpace(dom, dom.ndim) # 2-dimensional >>> grad_op = Gradient(dom) >>> grad_op.range == ran True >>> grad_op2 = Gradient(range=ran) >>> grad_op2.domain == dom True >>> grad_op3 = Gradient(domain=dom, range=ran) >>> grad_op3.domain == dom True >>> grad_op3.range == ran True Calling the operator: >>> data = np.array([[ 0., 1., 2., 3., 4.], ... [ 0., 2., 4., 6., 8.]]) >>> discr = odl.uniform_discr([0, 0], [2, 5], data.shape) >>> f = discr.element(data) >>> grad = Gradient(discr) >>> grad_f = grad(f) >>> grad_f[0] uniform_discr([ 0., 0.], [ 2., 5.], (2, 5)).element( [[ 0., 1., 2., 3., 4.], [ 0., -2., -4., -6., -8.]] ) >>> grad_f[1] uniform_discr([ 0., 0.], [ 2., 5.], (2, 5)).element( [[ 1., 1., 1., 1., -4.], [ 2., 2., 2., 2., -8.]] ) Verify adjoint: >>> g = grad.range.element((data, data ** 2)) >>> adj_g = grad.adjoint(g) >>> adj_g uniform_discr([ 0., 0.], [ 2., 5.], (2, 5)).element( [[ 0., -2., -5., -8., -11.], [ 0., -5., -14., -23., -32.]] ) >>> g.inner(grad_f) / f.inner(adj_g) 1.0 """ if domain is None and range is None: raise ValueError('either `domain` or `range` must be specified') if domain is None: try: domain = range[0] except TypeError: pass if range is None: range = ProductSpace(domain, domain.ndim) # Check range first since `domain` may end up to be `None` in # the case filtered out here (see above) if not isinstance(range, ProductSpace): raise TypeError('`range` {!r} is not a `ProductSpace` instance' ''.format(range)) elif not range.is_power_space: raise ValueError('`range` {!r} is not a power space' ''.format(range)) if not isinstance(domain, DiscreteLp): raise TypeError('`domain` {!r} is not a `DiscreteLp` ' 'instance'.format(domain)) if len(range) != domain.ndim: raise ValueError('`range` must be a power space of length n = {},' 'with `n == domain.ndim`, got n = {} instead' ''.format(domain.ndim, len(range))) linear = not (pad_mode == 'constant' and pad_const != 0) super(Gradient, self).__init__(domain, range, base_space=domain, linear=linear) self.method, method_in = str(method).lower(), method if method not in _SUPPORTED_DIFF_METHODS: raise ValueError('`method` {} not understood' ''.format(method_in)) self.pad_mode, pad_mode_in = str(pad_mode).lower(), pad_mode if pad_mode not in _SUPPORTED_PAD_MODES: raise ValueError('`pad_mode` {} not understood' ''.format(pad_mode_in)) self.pad_const = domain.field.element(pad_const)
def __init__(self, operators, domain=None, range=None): """Initialize a new instance. Parameters ---------- operators : `array-like` An array of `Operator`'s, must be 2-dimensional. domain : `ProductSpace`, optional Domain of the operator. If not provided, it is tried to be inferred from the operators. This requires each **column** to contain at least one operator. range : `ProductSpace`, optional Range of the operator. If not provided, it is tried to be inferred from the operators. This requires each **row** to contain at least one operator. Examples -------- >>> r3 = odl.rn(3) >>> pspace = odl.ProductSpace(r3, r3) >>> I = odl.IdentityOperator(r3) >>> x = pspace.element([[1, 2, 3], ... [4, 5, 6]]) Create an operator that sums two inputs: >>> prod_op = odl.ProductSpaceOperator([[I, I]]) >>> prod_op(x) ProductSpace(rn(3), 1).element([ [ 5., 7., 9.] ]) Diagonal operator -- 0 or ``None`` means ignore, or the implicit zero operator: >>> prod_op = odl.ProductSpaceOperator([[I, 0], ... [0, I]]) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 4., 5., 6.] ]) If a column is empty, the operator domain must be specified. The same holds for an empty row and the range of the operator: >>> prod_op = odl.ProductSpaceOperator([[I, 0], ... [I, 0]], domain=r3 ** 2) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 1., 2., 3.], [ 1., 2., 3.] ]) >>> prod_op = odl.ProductSpaceOperator([[I, I], ... [0, 0]], range=r3 ** 2) >>> prod_op(x) ProductSpace(rn(3), 2).element([ [ 5., 7., 9.], [ 0., 0., 0.] ]) """ # Lazy import to improve `import odl` time import scipy.sparse # Validate input data if domain is not None: if not isinstance(domain, ProductSpace): raise TypeError('`domain` {!r} not a ProductSpace instance' ''.format(domain)) if domain.is_weighted: raise NotImplementedError('weighted spaces not supported') if range is not None: if not isinstance(range, ProductSpace): raise TypeError('`range` {!r} not a ProductSpace instance' ''.format(range)) if range.is_weighted: raise NotImplementedError('weighted spaces not supported') if isinstance(operators, scipy.sparse.spmatrix): if not all(isinstance(op, Operator) for op in operators.data): raise ValueError('sparse matrix `operator` contains non-' '`Operator` entries') self.__ops = operators else: self.__ops = self._convert_to_spmatrix(operators) # Set domain and range (or verify if given) if domain is None: domains = [None] * self.__ops.shape[1] else: domains = domain if range is None: ranges = [None] * self.__ops.shape[0] else: ranges = range for row, col, op in zip(self.__ops.row, self.__ops.col, self.__ops.data): if domains[col] is None: domains[col] = op.domain elif domains[col] != op.domain: raise ValueError('column {}, has inconsistent domains, ' 'got {} and {}' ''.format(col, domains[col], op.domain)) if ranges[row] is None: ranges[row] = op.range elif ranges[row] != op.range: raise ValueError('row {}, has inconsistent ranges, ' 'got {} and {}' ''.format(row, ranges[row], op.range)) if domain is None: for col, sub_domain in enumerate(domains): if sub_domain is None: raise ValueError('col {} empty, unable to determine ' 'domain, please use `domain` parameter' ''.format(col)) domain = ProductSpace(*domains) if range is None: for row, sub_range in enumerate(ranges): if sub_range is None: raise ValueError('row {} empty, unable to determine ' 'range, please use `range` parameter' ''.format(row)) range = ProductSpace(*ranges) # Set linearity linear = all(op.is_linear for op in self.__ops.data) super(ProductSpaceOperator, self).__init__(domain=domain, range=range, linear=linear)
def __init__(self, domain=None, range=None, method='forward', pad_mode='constant', pad_const=0): """Initialize a new instance. Zero padding is assumed for the adjoint of the `Divergence` operator to match the negative `Gradient` operator. Parameters ---------- domain : power space of `DiscreteLp`, optional Space of elements which the operator acts on. This is required if ``range`` is not given. range : `DiscreteLp`, optional Space of elements to which the operator maps. This is required if ``domain`` is not given. method : {'central', 'forward', 'backward'}, optional Finite difference method to be used pad_mode : string, optional The padding mode to use outside the domain. ``'constant'``: Fill with ``pad_const``. ``'symmetric'``: Reflect at the boundaries, not doubling the ``'periodic'``: Fill in values from the other side, keeping the order. ``'order0'``: Extend constantly with the outmost values (ensures continuity). ``'order1'``: Extend with constant slope (ensures continuity of the first derivative). This requires at least 2 values along each axis. ``'order2'``: Extend with second order accuracy (ensures continuity of the second derivative). This requires at least 3 values along each axis. pad_const : float, optional For ``pad_mode == 'constant'``, ``f`` assumes ``pad_const`` for indices outside the domain of ``f`` Examples -------- Initialize a Divergence opeator: >>> ran = odl.uniform_discr([0, 0], [3, 5], (3, 5)) >>> dom = odl.ProductSpace(ran, ran.ndim) # 2-dimensional >>> div = Divergence(dom) >>> div.range == ran True >>> div2 = Divergence(range=ran) >>> div2.domain == dom True >>> div3 = Divergence(domain=dom, range=ran) >>> div3.domain == dom True >>> div3.range == ran True Call the operator: >>> data = np.array([[0., 1., 2., 3., 4.], ... [1., 2., 3., 4., 5.], ... [2., 3., 4., 5., 6.]]) >>> f = div.domain.element([data, data]) >>> div_f = div(f) >>> print(div_f) [[2.0, 2.0, 2.0, 2.0, -3.0], [2.0, 2.0, 2.0, 2.0, -4.0], [-1.0, -2.0, -3.0, -4.0, -12.0]] Verify adjoint: >>> g = div.range.element(data ** 2) >>> adj_div_g = div.adjoint(g) >>> g.inner(div_f) / f.inner(adj_div_g) 1.0 """ if domain is None and range is None: raise ValueError('either `domain` or `range` must be specified') if domain is None: if not isinstance(range, DiscreteLp): raise TypeError('`range` {!r} is not a DiscreteLp instance' ''.format(range)) domain = ProductSpace(range, range.ndim) if range is None: if not isinstance(domain, ProductSpace): raise TypeError('`domain` {!r} is not a ProductSpace instance' ''.format(domain)) range = domain[0] linear = not (pad_mode == 'constant' and pad_const != 0) super(Divergence, self).__init__(domain, range, base_space=range, linear=linear) self.method, method_in = str(method).lower(), method if method not in _SUPPORTED_DIFF_METHODS: raise ValueError('`method` {} not understood' ''.format(method_in)) self.pad_mode, pad_mode_in = str(pad_mode).lower(), pad_mode if pad_mode not in _SUPPORTED_PAD_MODES: raise ValueError('`pad_mode` {} not understood' ''.format(pad_mode_in)) self.pad_const = range.field.element(pad_const)
def _call(self, X): """ Shooting equations and Integration Args: X = [I0, m] with I0 template image and m vectorvalued momentum Returns: I: deformed Image over time (N+1 timesteps) M: vectorvalued momentum in eulerian coordinates over time V: vectorfields over time """ template = X[0].copy() m0 = X[1].copy() # create spaces for time-dependent Image and vectorfields series_image_space_integration = ProductSpace(template.space, self.N + 1) series_vector_space_integration = ProductSpace(m0.space, self.N + 1) inv_N = 1 / self.N I = series_image_space_integration.element() I[0] = template.copy() M = series_vector_space_integration.element() M[0] = m0.copy() U = series_vector_space_integration.element() # Create the gradient op grad_op = Gradient(domain=self.space, method='forward', pad_mode='symmetric') # Create the divergence op div_op = -grad_op.adjoint for i in range(self.N): m = M[i] # Kernel convolution to obtain vectorfields from momentum # v = K*m U[i] = (2 * np.pi)**( self.dim / 2.0) * self.vectorial_ft_fit_op.inverse( self.vectorial_ft_fit_op(m) * self.ft_kernel_fitting) u = U[i] # shooting equation # d/dt m = - ad*_v m gradu = [grad_op(u[0]), grad_op(u[1])] dtM0 = sum(grad_op(m[0]) * u) + sum( [gradu[i][0] * m[i] for i in range(self.dim)]) + div_op(u) * m[0] dtM1 = sum(grad_op(m[1]) * u) + sum( [gradu[i][1] * m[i] for i in range(self.dim)]) + div_op(u) * m[1] # Integration step M[i + 1] = M[i] - inv_N * m0.space.element([dtM0, dtM1]) I[i + 1] = odl.deform.linear_deform(I[i], -inv_N * u) U[self.N] = (2 * np.pi)**( self.dim / 2.0) * self.vectorial_ft_fit_op.inverse( self.vectorial_ft_fit_op(M[self.N]) * self.ft_kernel_fitting) return I, M, U