コード例 #1
0
    def test_callable(self):
        self.assertAlmostEqual(self.term(self.camera.ravel()), 0)

        F = MriDft(self.camera.shape)
        data = F * self.camera.ravel()
        indicator = DatanormL2(operator=F,
                               image_size=self.camera.shape,
                               data=data)
        self.assertAlmostEqual(indicator(self.camera.ravel()), 0)
コード例 #2
0
ファイル: base_interface.py プロジェクト: lucasplagwitz/recon
    def set_up_operator(self) -> None:
        assert self.reg_mode in self.possible_reg_modes

        # since every interface solves via Gradient at this time - will change in future versions
        self.K = Gradient(self.domain_shape, edge=True, dtype='float64', kind='backward', sampling=1)
        #if self.local_alpha:
        #    self.K = BlockDiag([Diagonal(self.alpha.ravel())]*len(self.domain_shape)) * self.K

        if self.reg_mode == 'tv':
            self.F_star = IndicatorL2(self.domain_shape,
                                      len(self.domain_shape),
                                      prox_param=self.tau,
                                      upper_bound=self.alpha)
        elif self.reg_mode == 'tikhonov':
            self.K = self.alpha*self.K  # it is possible to rewrite DatanormL2 -> x/self.alpha and lam=self.alpha
            self.F_star = DatanormL2(image_size=self.K.shape[0], data=0, prox_param=self.tau)
        else:
            self.F_star = DatanormL2(image_size=self.domain_shape, data=0, prox_param=self.tau)
            self.K = 0
        return
コード例 #3
0
ファイル: satv.py プロジェクト: lucasplagwitz/recon
    def __init__(self,
                 domain_shape: Union[np.ndarray, tuple],
                 assessment: float = 1,
                 noise_sigma: float = 0.2,
                 reg_mode: str = 'tv',
                 lam: Union[float, np.ndarray] = 0.01,
                 alpha: Union[float, tuple] = 1,
                 tau: Union[float, str] = 'calc',
                 stepsize: float = 2,
                 window_size: int = 10,
                 data_output_path: str = '',
                 plot_iteration: bool = False):
        self._reg_mode = None

        super(SATV, self).__init__(domain_shape=domain_shape,
                                   reg_mode=reg_mode,
                                   possible_reg_modes=['tv', 'tgv'],
                                   alpha=alpha,
                                   lam=lam,
                                   tau=tau)

        self.true_value = None
        self.domain_shape = domain_shape
        self.reg_mode = reg_mode
        self.solver = None
        self.plot_iteration = plot_iteration
        self.assessment = assessment
        self.noise_sigma = noise_sigma
        self.data_output_path = data_output_path
        self.norm = 1
        self.window_size = window_size
        self.bregman = False
        self.stepsize = stepsize
        self.G_template = None

        self.operator = Identity(domain_dim=domain_shape)

        self.old_lam = 1

        if isinstance(lam, float):
            self.lam = lam * np.ones(domain_shape)
        else:
            self.lam = lam

        self.G = DatanormL2(image_size=domain_shape,
                            prox_param=self.tau,
                            lam=self.lam)
コード例 #4
0
ファイル: recon.py プロジェクト: lucasplagwitz/recon
    def __init__(self,
                 operator,
                 domain_shape: Union[np.ndarray, tuple],
                 reg_mode: str = '',
                 alpha: float = 0.01,
                 lam: float = 1,
                 tau: Union[float, str] = 'auto',
                 extend_pdhgm=True,
                 data=None,
                 sampling: Union[np.ndarray, None] = None):

        assert self._check_operator(operator)

        self.operator = operator

        super(Recon, self).__init__(
            domain_shape=domain_shape,
            reg_mode=reg_mode,
            lam=lam,
            alpha=alpha,
            possible_reg_modes=['tv', 'tikhonov', 'tik', 'tgv', None],
            tau=tau)

        if hasattr(operator, 'inv') and not extend_pdhgm:
            self.G = DatanormL2(operator=operator,
                                image_size=domain_shape,
                                prox_param=self.tau,
                                lam=lam,
                                sampling=sampling)
            self.extend_pdhgm = False
        else:
            self.data = data
            self.extend_pdhgm = True
            self.operator = operator

            self.norm = power_method(self.operator,
                                     self.operator.H,
                                     max_iter=100)
            self.tau = 0.99 * np.sqrt(1 / self.norm)
            #self.norm = np.abs(np.asscalar((self.operator.H * self.operator).eigs(neigs=1,
            #                                                                      symmetric=True,
            #                                                                      largest=True,
            #                                                                      uselobpcg=True)))
            #print(self.norm)
            self.breg_p = 0
コード例 #5
0
    def test_prox_operator(self):
        F = MriDft(self.camera.shape)
        raveled_camera = self.camera.ravel()
        data = F * raveled_camera
        self.term = DatanormL2(operator=F,
                               image_size=self.camera.shape,
                               data=data,
                               prox_param=0.1)
        prox_camera = np.zeros(shape=raveled_camera.shape)

        # apply prox => decrease indicator => converge fast to zero
        for i in range(150):
            prev_camera = prox_camera
            prox_camera = self.term.prox(prox_camera)
            self.assertGreaterEqual(self.term(prev_camera),
                                    self.term(prox_camera))

        np.testing.assert_almost_equal(raveled_camera, prox_camera, 3)
コード例 #6
0
    def __init__(self,
                 image_size,
                 classes: list,
                 lam: float = 100,
                 alpha: float = 1,
                 tau: Union[float, str] = None):

        super(Segmentation, self).__init__(domain_shape=image_size,
                                           reg_mode='tv',
                                           possible_reg_modes=['tv'],
                                           lam=lam,
                                           alpha=alpha,
                                           tau=tau)

        self.seg = np.zeros((np.prod(self.domain_shape), len(classes)))
        self.classes = classes
        self.multi = False
        self.G = DatanormL2(image_size=image_size,
                            prox_param=self.tau,
                            lam=self.lam)
コード例 #7
0
    def __init__(self,
                 domain_shape: Union[np.ndarray, tuple],
                 reg_mode: str = '',
                 alpha: float = 1,
                 lam: float = 1,
                 norm: str = 'L2',
                 tau: Union[float, str] = 'calc'):

        super(Smoothing,
              self).__init__(domain_shape=domain_shape,
                             reg_mode=reg_mode,
                             possible_reg_modes=['tv', 'tikhonov', None],
                             alpha=alpha,
                             lam=lam,
                             tau=tau)

        if norm == 'L2':
            self.G = DatanormL2(domain_shape,
                                lam=self.lam,
                                prox_param=self.tau)
        elif norm == 'L1':
            self.G = DatanormL1(domain_shape,
                                lam=self.lam,
                                prox_param=self.tau)
コード例 #8
0
    def solve(self, f: np.ndarray):
        self.k = 1
        if len(np.shape(f)) != 2:
            raise ValueError(
                "The TGV-Algorithm only implemnted for 2D images. Please give input shaped (m, n)"
            )
        (primal_n, primal_m) = np.shape(f)
        grad = Gradient(dims=(primal_n, primal_m),
                        dtype='float64',
                        edge=True,
                        kind="backward")
        grad_v = BlockDiag(
            [grad, grad]
        )  # symmetric dxdy <-> dydx not necessary (expensive) but easy and functional
        p, q = 0, 0
        v = v_bar = np.zeros(2 * primal_n * primal_m)
        u = u_bar = f.ravel()

        # Projections
        proj_p = IndicatorL2((primal_n, primal_m), upper_bound=self.alpha[0])
        proj_q = IndicatorL2((2 * primal_n, primal_m),
                             upper_bound=self.alpha[1])
        if self.mode == 'tv':
            dataterm = DatanormL2(image_size=f.shape,
                                  data=f.ravel(),
                                  prox_param=self.tau,
                                  lam=self.lam)
        else:
            dataterm = DatanormL2Bregman(image_size=f.shape,
                                         data=f.ravel(),
                                         prox_param=self.tau,
                                         lam=self.lam)
            dataterm.pk = self.pk
            dataterm.bregman_weight_alpha = self.alpha[0]
        sens = 100
        while (self.tol < sens or self.k == 1) and (self.k <= self.max_iter):
            p = proj_p.prox(p + self.sigma * (grad * u_bar - v_bar))
            q = proj_q.prox(q + self.sigma *
                            (grad_v * v_bar))  #self.adjoint_div(v_bar, 1)
            u_old = u
            v_old = v
            u = dataterm.prox(u - self.tau * grad.H * p)
            u_bar = 2 * u - u_old
            v = v + self.tau * (p - grad_v.H * q)
            v_bar = 2 * v - v_old

            #self.update_sensivity(u, u_old, v, v_old, grad)
            # test
            if self.k % 300 == 0:
                u_gap = u - u_old
                v_gap = v - v_old
                #sens = 1/2*(
                #        np.linalg.norm(u_gap - self.tau*grad.H*proj_p.prox(p+self.sigma*(grad*u_gap - v_gap)), 2)/
                #        np.linalg.norm(u, 2) +
                #        np.linalg.norm(v_gap-proj_p.prox(p+self.sigma*(grad*u_gap - v_gap))-grad_v.H*proj_q.prox(q+ self.sigma*(grad_v*v_gap)), 2)/
                #        np.linalg.norm(v, 2))   # not best sens
                sens = 1 / 2 * (
                    np.linalg.norm(u_gap - self.tau * grad.H * v_gap, 2) /
                    np.linalg.norm(u, 2) +
                    np.linalg.norm(v_gap - self.sigma *
                                   (grad * u_gap), 2) / np.linalg.norm(v, 2))
                print(sens)
            self.k += 1
        return np.reshape(u, (primal_n, primal_m))
コード例 #9
0
 def setUp(self) -> None:
     filename = os.path.join(skimage.data_dir, 'camera.png')
     self.camera = io.imread(filename)[50:250, 50:250]
     self.term = DatanormL2(image_size=self.camera.shape,
                            data=self.camera.ravel())