Пример #1
0
    def __init__(self, *args):
        self.ops = []
        for _, arg in enumerate(args):
            if arg is None:
                continue
            elif isinstance(arg, Operator):
                self.ops.append(arg)
            elif isinstance(arg, list):
                for op in arg:
                    if op is None:
                        continue
                    elif isinstance(op, Operator):
                        self.ops.append(op)
            else:
                raise TypeError(
                    'Argument must be either Operator or list of Operators')

        # build domain and range
        self.n = len(self.ops)
        op_range = []
        op_domain = []
        for idx in range(self.n):
            op_domain += [self.ops[idx].domain]
            op_range += [self.ops[idx].range]

        super(Dstack, self).__init__(domain=superVector(op_domain),
                                     range=superVector(op_range),
                                     name="Dstack")
Пример #2
0
    def __init__(self, model, data, op, eps=1., reg=None,
                 minBound=None, maxBound=None, boundProj=None):
        """
        GeneralizedLasso constructor

        Args:
            model: initial domain vector
            data: data vector
            op: linear operator
            eps: regularization weight
            reg: regularizer operator (default: Identity)
            minBound: lower bound vector
            maxBound: upper bound vector
            boundProj: class with a method `apply(input_vec)` to project input_vec onto some convex set
        """
        super(GeneralizedLasso, self).__init__(model=model, data=data, minBound=minBound, maxBound=maxBound, boundProj=boundProj,
                                               name="Generalized Lasso")
        
        self.grad = self.pert_model.clone()
        self.op = op

        # L1 Regularization
        self.reg_op = reg if reg is not None else Identity(model)
        self.eps = eps

        # Last settings
        self.obj_terms = [None] * 2
        self.linear = True
        # store the "residuals" (for computing the objective function)
        self.res_data = self.op.range.clone().zero()
        self.res_reg = self.reg_op.range.clone().zero()
        # this last superVector is instantiated with pointers to res_data and res_reg!
        self.res = superVector(self.res_data, self.res_reg)
Пример #3
0
    def __init__(self, *args):
        self.ops = []
        for _, arg in enumerate(args):
            if arg is None:
                continue
            elif isinstance(arg, Operator):
                self.ops.append(arg)
            elif isinstance(arg, list):
                for op in arg:
                    if op is None:
                        continue
                    elif isinstance(op, Operator):
                        self.ops.append(op)
            else:
                raise TypeError('Argument must be either Operator or Hstack')

        # check domain
        self.n = len(self.ops)
        domain = []
        for idx in range(self.n):
            if idx < self.n - 1:
                if not self.ops[idx].range.checkSame(self.ops[idx + 1].range):
                    raise ValueError(
                        'Range incompatibility between Op %d and Op %d' %
                        (idx, idx + 1))
            domain += [self.ops[0].domain]
        super(Hstack, self).__init__(domain=superVector(domain),
                                     range=self.ops[0].range,
                                     name="Hstack")
Пример #4
0
    def __init__(self, model, data, op, op_norm=None, lambda_value=None,
                 minBound=None, maxBound=None, boundProj=None):
        """
        Lasso constructor
        
        Args:
            model: initial domain vector
            data: data vector
            op: linear operator
            op_norm: operator norm that will be computed with the power method if not provided
            lambda_value: regularization weight
            minBound: lower bound vector
            maxBound: upper bound vector
            boundProj: class with a function "apply(input_vec)" to project input_vec onto some convex set
        """
        # Setting the bounds (if any)
        super(Lasso, self).__init__(model=model, data=data, minBound=minBound, maxBound=maxBound, boundProj=boundProj,
                                    name="Lasso")
        
        # Gradient vector
        self.grad = self.pert_model.clone()
        
        # Setting linear operator
        self.op = op  # Modeling operator
        
        # Residual vector (data and model residual vectors)
        self.res = superVector(op.range.clone(), op.domain.clone()).zero()

        # Dresidual vector
        self.pert_res = None  # Not necessary for the inversion
        
        # Setting default variables
        self.setDefaults()
        self.linear = True
        if op_norm is not None:
            # Using user-provided op operator norm
            self.op_norm = op_norm  # Operator Norm necessary for solver
        else:
            # Evaluating operator norm using power method
            self.op_norm = self.op.powerMethod()
        self.lambda_value = lambda_value
        # Objective function terms (useful to analyze each term)
        self.obj_terms = [None, None]
Пример #5
0
 def run(self, problem, verbose=False, inner_verbose=False, restart=False):
     """Run SplitBregman solver"""
     if type(problem) != GeneralizedLasso:
         raise TypeError("Input problem object must be a GeneralizedLasso")
     
     verbose = True if inner_verbose else verbose
     create_msg = verbose or self.logger
     
     # overriding save_grad variable
     self.save_grad = False
     
     # reset stopper before running the inversion
     self.stopper.reset()
     
     # initialize all the vectors and operators for Split-Bregman
     breg_b = problem.reg_op.range.clone().zero()
     breg_d = breg_b.clone()
     RL1x = breg_b.clone()  # store RegL1 * solution
     
     sb_mdl = problem.model.clone()
     if sb_mdl.norm() != 0.:
         self.warm_start = True
     sb_mdl_old = problem.model.clone()
     
     reg_op = np.sqrt(problem.eps) * problem.reg_op  # TODO can we avoid this?
     
     # inner problem
     linear_problem = LeastSquares(model=sb_mdl.clone(),
                                   data=superVector(problem.data, breg_d.clone()),
                                   op=Vstack(problem.op, reg_op),
                                   minBound=problem.minBound,
                                   maxBound=problem.maxBound,
                                   boundProj=problem.boundProj)
     
     if restart:
         self.restart.read_restart()
         outer_iter = self.restart.retrieve_parameter("iter")
         initial_obj_value = self.restart.retrieve_parameter("obj_initial")
         sb_mdl = self.restart.retrieve_vector("sb_mdl")
         if create_msg:
             msg = "Restarting previous solver run from: %s" % self.restart.restart_folder
             if verbose:
                 print(msg)
             if self.logger:
                 self.logger.addToLog(msg)
     
     else:
         outer_iter = 0
         if create_msg:
             msg = 90 * '#' + '\n'
             msg += 12 * " " + "SPLIT-BREGMAN Solver log file\n"
             msg += 4 * " " + "Restart folder: %s\n" % self.restart.restart_folder
             msg += 4 * " " + "Inner iterations: %d\n" % self.niter_inner
             msg += 4 * " " + "Solver iterations: %d\n" % self.niter_solver
             msg += 4 * " " + "Problem: %s\n" % problem.name
             msg += 4 * " " + "L1 Regularizer weight: %.2e\n" % problem.eps
             msg += 4 * " " + "Bregman update weight: %.2e\n" % self.breg_weight
             if self.warm_start:
                 msg += 4 * " " + "Using warm start option for inner problem\n"
             msg += 90 * '#' + '\n'
             if verbose:
                 print(msg.replace(" log file", ""))
             if self.logger:
                 self.logger.addToLog(msg)
             if self.logger_lin_solv:
                 msg = 90 * '#' + '\n'
                 msg += "\t\t\tSPLIT-BREGMAN Solver internal inversions log file\n"
                 msg += 90 * '#' + '\n'
                 self.logger_lin_solv.addToLog(msg)
     
     # Main iteration loop
     while True:
         obj0 = problem.get_obj(sb_mdl)
         # Saving previous model vector
         sb_mdl_old.copy(sb_mdl)
         
         if outer_iter == 0:
             initial_obj_value = obj0
             self.restart.save_parameter("obj_initial", initial_obj_value)
             if create_msg:
                 msg = self.iter_msg % (str(outer_iter).zfill(self.stopper.zfill),
                                        obj0,
                                        problem.obj_terms[0],
                                        obj0 - problem.obj_terms[0],
                                        problem.get_rnorm(sb_mdl))
                 if verbose:
                     print(msg)
                 if self.logger:
                     self.logger.addToLog("\n" + msg)
         
         if self.logger_lin_solv:
             self.logger_lin_solv.addToLog("\n" + 12 * " " + "Outer iteration: %s"
                                           % (str(outer_iter).zfill(self.stopper.zfill)))
         
         if isnan(obj0):
             raise ValueError("Objective function values NaN!")
         
         if obj0 <= ZERO:
             print("Objective function is numerically zero! Stop the inversion")
             break
         
         self.save_results(outer_iter, problem, force_save=False)
         
         for iter_inner in range(self.niter_inner):
             
             if self.logger_lin_solv:
                 msg = 8 * " " + "starting inner iter %d with d = %.2e, b = %.2e" \
                       % (iter_inner, breg_d.norm(), breg_b.norm())
                 self.logger_lin_solv.addToLog("\n" + msg)
             
             # resetting inversion problem variables
             if not self.warm_start:
                 linear_problem.model.zero()
             # prior = d - b
             linear_problem.data.vecs[-1].copy(breg_b)
             linear_problem.data.vecs[-1].scaleAdd(breg_d, -1., 1.)
             linear_problem.data.vecs[-1].scale(np.sqrt(problem.eps))  # TODO can we avoid this?
             linear_problem.setDefaults()
             
             # solve inner problem
             self.linear_solver.run(linear_problem, verbose=inner_verbose)
             
             # compute RL1*x
             problem.reg_op.forward(False, linear_problem.model, RL1x)
             
             # update breg_d
             breg_d.copy(_soft_thresh(RL1x.clone() + breg_b, thresh=problem.eps))
             
             if self.logger_lin_solv:
                 msg = 8 * " " + "finished inner iter %d with sb_mdl = %.2e, RL1x = %.2e" \
                       % (iter_inner, linear_problem.model.norm(), RL1x.norm())
                 self.logger_lin_solv.addToLog(msg)
         
         # update breg_b
         breg_b.scaleAdd(RL1x, 1.0, self.breg_weight)
         breg_b.scaleAdd(breg_d, 1., -self.breg_weight)
         
         # Update SB model
         sb_mdl.copy(linear_problem.model)
         
         # check objective function
         obj1 = problem.get_obj(sb_mdl)
         sb_mdl_norm = sb_mdl.norm()
         chng_norm = sb_mdl_old.scaleAdd(sb_mdl, 1., -1.).norm()
         if chng_norm <= self.mod_tol * sb_mdl_norm:
             if create_msg:
                 msg = "Relative model change (%.4e) norm smaller than given tolerance (%.4e)" \
                       % (chng_norm, self.mod_tol * sb_mdl_norm)
                 if verbose:
                     print(msg)
                 if self.logger:
                     self.logger.addToLog(msg)
             break
         
         # iteration info
         outer_iter += 1
         if create_msg:
             msg = self.iter_msg % (str(outer_iter).zfill(self.stopper.zfill),
                                    obj1,
                                    problem.obj_terms[0],
                                    obj1 - problem.obj_terms[0],
                                    problem.get_rnorm(sb_mdl))
             if verbose:
                 print(msg)
             if self.logger:
                 self.logger.addToLog("\n" + msg)
         
         # saving in case of restart
         self.restart.save_parameter("iter", outer_iter)
         self.restart.save_vector("sb_mdl", sb_mdl)
         
         if self.stopper.run(problem=problem, iiter=outer_iter, verbose=verbose, initial_obj_value=initial_obj_value):
             break
     
     # writing last inverted model
     self.save_results(outer_iter, problem, force_save=True, force_write=True)
     
     # ending message and log file
     if create_msg:
         msg = 90 * '#' + '\n'
         msg += 12 * " " + "SPLIT-BREGMAN Solver log file end\n"
         msg += 90 * '#'
         if verbose:
             print(msg.replace(" log file", ""))
         if self.logger:
             self.logger.addToLog("\n" + msg)
     
     # Clear restart object
     self.restart.clear_restart()