Пример #1
0
    def _update_with_backtracking(self, d, c, gradient_d, gradient_c,
                                  step_d, step_c):
        x = self.X
        d_0 = d
        c_0 = c
        error = 0.5 * np.linalg.norm(x - c_0.dot(d_0))**2
        sigma = 0.9
        for i in range(1, 1000):
            # compute new matrices
            d_1 = self.dict_penalty. \
                apply_prox_operator(d_0 - step_d * gradient_d, gamma=step_d)
            d_1 = non_negative_projection(d_1, self.non_negativity, 'dict')
            difference_d = np.linalg.norm(d_1 - d_0)

            c_1 = self.coeff_penalty. \
                apply_prox_operator(c_0 - step_c * gradient_c, gamma=step_c)
            c_1 = non_negative_projection(c_1, self.non_negativity, 'coeff')
            difference_c = np.linalg.norm(c_1 - c_0)

            first_part = self.objective_function_value(d=d_1, c=c_1)

            second_part = (error +
                           self.dict_penalty.value(d_1) +
                           self.coeff_penalty.value(c_1) +
                           (difference_c*gradient_c).trace() +
                           (difference_d*gradient_d).trace() +
                           step_c*sigma**i * difference_c +
                           step_d*sigma**i * difference_d)

            if first_part <= second_part:
                break

        return d_1, c_1
Пример #2
0
    def _simple_update(self, d, c, gradient_d, gradient_c, step_d, step_c):
        d = self.dict_penalty. \
            apply_prox_operator(d - step_d * gradient_d, gamma=step_d)
        d = non_negative_projection(d, self.non_negativity, 'dict')

        c = self.coeff_penalty. \
            apply_prox_operator(c - step_c * gradient_c, gamma=step_c)
        c = non_negative_projection(c, self.non_negativity, 'coeff')
        return d, c
    def _proximal_gradient_minimization(self,
                                        random_state,
                                        backtracking=0,
                                        n_iter=20000):
        x = self.X
        d = self.D
        n, p = x.shape
        k, _ = d.shape
        c = non_negative_projection(
            random_state.rand(n, k) * 10 - 5, self.non_negativity)

        gamma = 1.1
        step = _step_lipschitz(d, gamma)
        iters = 20000
        epsilon = 1e-4
        objective = self.objective_function_value(d=d, c=c)

        c_old = c
        logging.debug("Starting optimization")
        for i in range(iters):
            gradient = (c.dot(d) - x).dot(d.T)

            if backtracking:
                c = self._update_with_backtracking(x, d, c, gradient, step)
            else:
                c = self._simple_update(c, gradient, step)

            new_objective = self.objective_function_value(d=d, c=c)
            difference_objective = new_objective - objective
            objective = new_objective
            difference_c = np.linalg.norm(c - c_old)
            c_old = c

            step = _step_lipschitz(d, gamma)

            logging.debug("Iteration: %i" % (i))
            logging.debug("Objective function: %f" % (objective))
            logging.debug("Difference between new objective and "
                          "previous one: %f" % (difference_objective))
            logging.debug("Difference between previous and new coefficients: "
                          "%f" % (difference_c))
            logging.debug("\n")

            assert ((not np.isnan(difference_objective))
                    and (not np.isinf(difference_objective))
                    and abs(difference_objective) < 1e+20)

            if (abs(difference_objective) <= epsilon
                    and difference_c <= epsilon):
                break

        return c
    def _update_with_backtracking(self, x, d, c, gradient, step):
        c_0 = c
        error = 0.5 * np.linalg.norm(x - c_0.dot(d))**2
        sigma = 0.9
        for i in range(1, 1000):
            # compute new matrices
            c_1 = self.penalty. \
                apply_prox_operator(c_0 - step * gradient, gamma=step)
            c_1 = non_negative_projection(c_1, self.non_negativity)
            difference_c = np.linalg.norm(c_1 - c_0)

            first_part = self.objective_function_value(d=d, c=c_1)

            second_part = (error + self.coeff_penalty.value(c_1) +
                           (difference_c * gradient).trace() +
                           step * sigma**i * difference_c)

            if first_part <= second_part:
                break

        return c_1
 def _simple_update(self, c, gradient, step):
     c = self.penalty. \
         apply_prox_operator(c - step * gradient, gamma=step)
     c = non_negative_projection(c, self.non_negativity)
     return c
Пример #6
0
    def _alternating_proximal_gradient_minimization(self, random_state,
                                                    n_iter=20000,
                                                    backtracking=0):
        x = self.X
        n, p = x.shape
        d = non_negative_projection(random_state.rand(self.k, p)*10-5,
                                    self.non_negativity, 'dict')
        c = non_negative_projection(random_state.rand(n, self.k)*10-5,
                                    self.non_negativity, 'coeff')

        gamma_c = 1.1
        gamma_d = gamma_c
        step_d, step_c = _step_lipschitz(d, c,
                                         gamma_d=gamma_d, gamma_c=gamma_c)
        epsilon = 1e-4
        objective = self.objective_function_value(d=d, c=c)

        d_old = d
        c_old = c
        logging.debug("Starting optimization")
        for i in range(n_iter):
            gradient_d = c.T.dot(c.dot(d) - x)
            gradient_c = (c.dot(d) - x).dot(d.T)

            if backtracking:
                d, c = self._update_with_backtracking(d, c, gradient_d,
                                                      gradient_c, step_d,
                                                      step_c)
            else:
                d, c = self._simple_update(d, c, gradient_d, gradient_c,
                                           step_d, step_c)

            new_objective = self.objective_function_value(d=d, c=c)
            difference_objective = new_objective - objective
            objective = new_objective
            difference_d = np.linalg.norm(d - d_old)
            difference_c = np.linalg.norm(c - c_old)
            d_old = d
            c_old = c

            step_d, step_c = _step_lipschitz(d, c,
                                             gamma_c=gamma_c, gamma_d=gamma_d)

            logging.debug("Iteration: "+str(i))
            logging.debug("Objective function: "+str(objective))
            logging.debug("Difference between new objective and "
                          "previous one: "+str(difference_objective))
            logging.debug("Difference between previous and new dictionary: " +
                          str(difference_d))
            logging.debug("Difference between previous and new coefficients:" +
                          str(difference_c)+"`\n\n")

            assert ((not np.isnan(difference_objective)) and
                    (not np.isinf(difference_objective)) and
                    (abs(difference_objective) < 1e+20))

            if (abs(difference_objective) <= epsilon and
                    difference_d <= epsilon and
                    difference_c <= epsilon):
                break

        return d, c