Exemplo n.º 1
0
    def test_base(self):
        assert np.abs(bops.norm(self.x_np_ones) -
                      bops.norm(self.x_ocl_ones)) < 1e-8
        assert np.abs(
            np.linalg.norm(self.x_np_ones) - bops.norm(self.x_ocl_ones)) < 1e-8
        assert np.sum(
            np.abs(
                bops.angle(self.x_np_randn) -
                np.asarray(bops.angle(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.abs(self.x_np_randn) -
                np.asarray(bops.abs(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.exp(self.x_np_randn) -
                np.asarray(bops.exp(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.conj(self.x_np_randn) -
                np.asarray(bops.conj(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.flip(self.x_np_randn) -
                np.asarray(bops.flip(self.x_ocl_randn)))) < 1e-4
        assert np.sum(
            np.abs(
                bops.transpose(self.x_np_randn) -
                np.asarray(bops.transpose(self.x_ocl_randn)))) < 1e-4

        assert np.sum(np.abs(self.A_np - np.asarray(self.A_ocl))) < 1e-4
        assert np.sum(
            np.abs(self.x_np_randn - np.asarray(self.x_ocl_randn))) < 1e-4
Exemplo n.º 2
0
    def test_operator_norm_l2(self):
        L2 = ops.L2Norm(image_size[0] * image_size[1], dtype=global_dtype, backend=global_backend)

        # Check forward operator
        assert np.sum(np.abs(L2 * yp.vec(self.x) - 0.5 * yp.norm(yp.vec(self.x)) ** 2)) < eps, '%f' % np.sum(np.abs(L2 * yp.vec(self.x) - 0.5 * yp.norm(yp.vec(self.x)) ** 2))

        # Check gradient
        L2.gradient_check()
Exemplo n.º 3
0
    def _iteration_function(self, x, iteration_number, step_size):
        # Perform gradient step
        # TODO check gradient shape
        if step_size is not None:
            if hasattr(step_size, '__call__'):
                x[:] -= step_size(iteration_number) * self.objective.gradient(
                    x)
            else:
                """ Explicit step size is provided """
                x[:] -= step_size * self.objective.gradient(x)
        else:
            """ No explicit step size is provided """
            # If no step size provided, use optimal step size if objective is convex,
            # or backtracking linesearch if not.
            if self.objective.convex:
                g = self.objective.grad(x)
                step_size = yp.norm(g)**2 / (yp.norm(g)**2 + eps)
                x[:] -= step_size * g
            else:
                x[:], _ = backTrackingStep(
                    x.reshape(-1), lambda x: yp.scalar(self.objective(x)),
                    self.objective.grad(x).reshape(-1))

        return (x)
Exemplo n.º 4
0
    def solve(self,
              initialization=None,
              iteration_count=10,
              display_iteration_delta=None,
              **kwargs):

        # Process display iteration delta
        if display_iteration_delta is None:
            display_iteration_delta = iteration_count // 10

        # Try to import arrayfire and call garbage collection to free memory
        try:
            import arrayfire
            arrayfire.device_gc()
        except ImportError:
            pass

        # Initialize solver if it hasn't been already
        if not self.initialized:
            self._initialize(initialization, **kwargs)

        cost = []
        # Run Algorithm
        for iteration in range(iteration_count):

            # Determine step norm
            if self.multi_objective:
                x_prev_norm = sum([yp.norm(x) for x in self.x])
            else:
                x_prev_norm = yp.norm(self.x)

            # Perform iteration
            self.x = self._iteration_function(self.x, iteration,
                                              self.step_size)

            # Apply nesterov acceleration if desired
            if self.use_nesterov_acceleration:
                self.x = self.nesterov.iterate(self.x)

            # Store cost
            objective_value = self.objective(
                self.x) if not self.multi_objective else self.objective[0](
                    self.x[0])
            cost.append(abs(yp.scalar(objective_value)))

            # Determine step norm
            if self.multi_objective:
                step_norm = abs(
                    sum([yp.norm(x) for x in self.x]) - x_prev_norm)
            else:
                step_norm = abs(yp.norm(self.x) - x_prev_norm)

            # Show update
            if self.display_type == 'text':
                if (iteration + 1) % display_iteration_delta == 0:
                    self.plot.update(iteration + 1, cost[-1],
                                     time.time() - self.t0, step_norm)
            elif self.display_type == 'plot':
                self.plot.update(iteration, new_cost=cost[-1])
                self.fig.canvas.draw()
            elif self.display_type is not None:
                raise ValueError('display_type %s is not defined!' %
                                 self.display_type)

            # Check if converged or diverged
            if len(cost) > 2:
                if self.convergence_tol is not None and (
                        abs(cost[-1] - cost[-2]) / max(cost[-1], 1e-10) <
                        self.convergence_tol or cost[-1] < 1e-20):
                    print(
                        "Met convergence requirement (delta < %.2E) at iteration %d"
                        % (self.convergence_tol, iteration + 1))
                    return (self.x)
                elif cost[-1] > cost[-2] and not self.let_diverge:
                    print("Diverged at iteration %d" % (iteration + 1))
                    return (self.x)
        return (self.x)