Beispiel #1
0
    def _update(self):
        with self.device:
            if self.accelerate or self.proxg is not None:
                backend.copyto(self.x_old, self.x)

            if self.accelerate:
                backend.copyto(self.x, self.z)

            gradf_x = self.gradf(self.x)

            util.axpy(self.x, -self.alpha, gradf_x)

            if self.proxg is not None:
                backend.copyto(self.x, self.proxg(self.alpha, self.x))

            if self.accelerate:
                t_old = self.t
                self.t = (1 + (1 + 4 * t_old**2)**0.5) / 2
                backend.copyto(
                    self.z,
                    self.x + (t_old - 1) / self.t * (self.x - self.x_old))

            xp = self.device.xp
            if self.accelerate or self.proxg is not None:
                self.resid = util.asscalar(
                    xp.linalg.norm((self.x - self.x_old) / self.alpha**0.5))
            else:
                self.resid = util.asscalar(xp.linalg.norm(gradf_x))
Beispiel #2
0
        def gradf(x):
            with self.x_device:
                gradf_x = self.A.N(x) - AHy
                if self.lamda != 0:
                    if self.z is None:
                        util.axpy(gradf_x, self.lamda, x)
                    else:
                        util.axpy(gradf_x, self.lamda, x - self.z)

                return gradf_x
Beispiel #3
0
    def _update(self):
        self.minL(self.mu)
        if self.g is not None:
            device = backend.get_device(self.u)
            xp = device.xp
            with device:
                util.axpy(self.u, self.mu, self.g(self.x))
                backend.copyto(self.u, xp.clip(self.u, 0, np.infty))

        if self.h is not None:
            util.axpy(self.v, self.mu, self.h(self.x))
Beispiel #4
0
    def _update(self):
        backend.copyto(self.u_old, self.u)
        backend.copyto(self.x_old, self.x)

        # Update dual.
        delta_u = self.A(self.x_ext)
        util.axpy(self.u, self.sigma, delta_u)
        backend.copyto(self.u, self.proxfc(self.sigma, self.u))

        # Update primal.
        with self.x_device:
            delta_x = self.AH(self.u)
            if self.gradh is not None:
                delta_x += self.gradh(self.x)

            util.axpy(self.x, -self.tau, delta_x)
            backend.copyto(self.x, self.proxg(self.tau, self.x))

        # Update step-size if neccessary.
        if self.gamma_primal > 0 and self.gamma_dual == 0:
            with self.x_device:
                xp = self.x_device.xp
                theta = 1 / (
                    1 + 2 * self.gamma_primal * xp.amin(xp.abs(self.tau)))**0.5
                self.tau *= theta

            with self.u_device:
                self.sigma /= theta
        elif self.gamma_primal == 0 and self.gamma_dual > 0:
            with self.u_device:
                xp = self.u_device.xp
                theta = 1 / (
                    1 + 2 * self.gamma_dual * xp.amin(xp.abs(self.sigma)))**0.5
                self.sigma *= theta

            with self.x_device:
                self.tau /= theta
        else:
            theta = self.theta

        # Extrapolate primal.
        with self.x_device:
            xp = self.x_device.xp
            x_diff = self.x - self.x_old
            backend.copyto(self.x_ext, self.x + theta * x_diff)
            x_diff_norm = xp.linalg.norm(x_diff / self.tau**0.5).item()

        with self.u_device:
            xp = self.u_device.xp
            u_diff = self.u - self.u_old
            u_diff_norm = xp.linalg.norm(u_diff / self.sigma**0.5).item()

        self.resid = x_diff_norm**2 + u_diff_norm**2
Beispiel #5
0
    def _get_ConjugateGradient(self):
        I = linop.Identity(self.x.shape)
        AHA = self.A.H * self.A
        AHy = self.A.H(self.y)

        if self.lamda != 0:
            AHA += self.lamda * I
            if self.z is not None:
                util.axpy(AHy, self.lamda, self.z)

        self.alg = ConjugateGradient(
            AHA, AHy, self.x, P=self.P, max_iter=self.max_iter)
Beispiel #6
0
        def gradf(x):
            with self.y_device:
                r = self.A(x)
                r -= self.y

            with self.x_device:
                gradf_x = self.A.H(r)
                if self.lamda != 0:
                    if self.z is None:
                        util.axpy(gradf_x, self.lamda, x)
                    else:
                        util.axpy(gradf_x, self.lamda, x - self.z)

                return gradf_x
Beispiel #7
0
    def _get_ConjugateGradient(self):
        I = linop.Identity(self.x.shape)
        AHA = self.A.H * self.A
        AHy = self.A.H(self.y)

        if self.lamda != 0:
            if self.R is None:
                AHA += self.lamda * I
            else:
                AHA += self.lamda * self.R.H * self.R

        if self.mu != 0:
            AHA += self.mu * I
            util.axpy(AHy, self.mu, self.z)

        self.alg = ConjugateGradient(
            AHA, AHy, self.x, P=self.P, max_iter=self.max_iter)
Beispiel #8
0
    def _update(self):
        xp = self.device.xp
        with self.device:
            x_old = self.x.copy()

            if self.accelerate:
                backend.copyto(self.x, self.z)

            # Perform update
            util.axpy(self.x, -self.alpha, self.gradf(self.x))
            if self.proxg is not None:
                backend.copyto(self.x, self.proxg(self.alpha, self.x))

            if self.accelerate:
                t_old = self.t
                self.t = (1 + (1 + 4 * t_old**2)**0.5) / 2
                backend.copyto(
                    self.z, self.x + ((t_old - 1) / self.t) * (self.x - x_old))

            self.resid = xp.linalg.norm(self.x - x_old).item() / self.alpha
Beispiel #9
0
    def _update(self):
        x_old = self.x.copy()

        # Update dual.
        util.axpy(self.u, self.sigma, self.A(self.x_ext))
        backend.copyto(self.u, self.proxfc(self.sigma, self.u))

        # Update primal.
        with self.x_device:
            util.axpy(self.x, -self.tau, self.AH(self.u))
            backend.copyto(self.x, self.proxg(self.tau, self.x))

        # Update step-size if neccessary.
        if self.gamma_primal > 0 and self.gamma_dual == 0:
            with self.x_device:
                xp = self.x_device.xp
                theta = 1 / (1 + 2 * self.gamma_primal * self.tau_min)**0.5
                self.tau *= theta
                self.tau_min *= theta

            with self.u_device:
                self.sigma /= theta
        elif self.gamma_primal == 0 and self.gamma_dual > 0:
            with self.u_device:
                xp = self.u_device.xp
                theta = 1 / (1 + 2 * self.gamma_dual * self.sigma_min)**0.5
                self.sigma *= theta
                self.sigma_min *= theta

            with self.x_device:
                self.tau /= theta
        else:
            theta = self.theta

        # Extrapolate primal.
        with self.x_device:
            xp = self.x_device.xp
            x_diff = self.x - x_old
            self.resid = xp.linalg.norm(x_diff / self.tau**0.5).item()
            backend.copyto(self.x_ext, self.x + theta * x_diff)
Beispiel #10
0
    def _update(self):
        with self.device:
            xp = self.device.xp
            Ap = self.A(self.p)
            pAp = xp.real(xp.vdot(self.p, Ap)).item()
            if pAp <= 0:
                self.not_positive_definite = True
                return

            self.alpha = self.rzold / pAp
            util.axpy(self.x, self.alpha, self.p)
            if self.iter < self.max_iter - 1:
                util.axpy(self.r, -self.alpha, Ap)
                if self.P is not None:
                    z = self.P(self.r)
                else:
                    z = self.r

                rznew = xp.real(xp.vdot(self.r, z))
                beta = rznew / self.rzold
                util.xpay(self.p, beta, z)
                self.rzold = rznew

            self.resid = self.rzold.item()**0.5
Beispiel #11
0
    def _update(self):
        with self.device:
            xp = self.device.xp
            Ap = self.A(self.p)
            pAp = xp.real(xp.vdot(self.p, Ap))
            if pAp == 0:
                self.zero_gradient = True
                return

            self.alpha = self.rzold / pAp
            util.axpy(self.x, self.alpha, self.p)
            if self.iter < self.max_iter - 1:
                util.axpy(self.r, -self.alpha, Ap)
                if self.P is not None:
                    z = self.P(self.r)
                else:
                    z = self.r

                rznew = xp.real(xp.vdot(self.r, z))
                beta = rznew / self.rzold
                util.xpay(self.p, beta, z)
                self.rzold = rznew

            self.resid = util.asscalar(self.rzold)**0.5
Beispiel #12
0
    def run(self):
        with self.device:
            xp = self.device.xp
            img = self.x
            step_size = self.init_step_size

            fnorm = []
            tnorm = []
            cost = []
            for iter in range(self.max_iter):
                # calculate gradient of fidelity and regularization
                f_new = self._update_fidelity(img)
                util.axpy(f_new, self.lambda_t,
                          xp.squeeze(self._update_temporal_fd(img)))

                f2_new = xp.vdot(f_new, f_new)

                if iter == 0:
                    f2_old = f2_new
                    f_old = f_new

                # conjugate gradient
                beta = f2_new / (f2_old + xp.finfo(float).eps)
                util.axpy(f_new, beta, f_old)
                f2_old = f2_new
                f_old = f_new

                # update image
                fnorm_t = self._calculate_fnorm(img)
                tnorm_t = self._calculate_tnorm(img)
                cost_t = fnorm_t + tnorm_t

                step_size = self._line_search(img, f_new, cost_t, step_size)
                util.axpy(img, step_size, f_old)

                #  TODO stop criteria
                # if abs(np.vdot(update_old.flatten(), update_old.flatten())) * step_size < 1e-6:
                #    break
                if step_size < 2e-3:
                    break

                fnorm.append(fnorm_t)
                tnorm.append(tnorm_t)
                cost.append(cost_t)

                print("Iter[%d/%d]\tStep:%.5f\tCost:%.3f" %
                      (iter + 1, self.max_iter, step_size, cost_t))

            return img, fnorm, tnorm, cost
Beispiel #13
0
 def _update(self):
     self.min_lagrangian(self.x, self.u, self.mu)
     util.axpy(self.u, -self.mu, self.constraints(self.x))