예제 #1
0
    def loss(self):
        Y = self.Y.copy()
        if self.k0 > 0:
            Y -= np.dot(self.D0, self.X0)
        cost = 0.5*normF2(Y - np.dot(self.D, self.X)) + \
                0.5*self._fidelity() + \
                0.5*self.lambd2*self._discriminative() + \
                self.lambd*norm1(self.X)

        if self.k0 > 0:
            cost += self.lambd*norm1(self.X0) + \
                    0.5*self.lambd2*normF2(self.X0 - build_mean_matrix(self.X0)) \
                    + self.eta*nuclearnorm(self.D0)
        return cost
예제 #2
0
    def loss(self):
        """
        cost = COPAR_cost(Y, Y_range, D, D_range_ext, X, opts):
        Calculating cost function of COPAR with parameters lambda and eta are
        stored in `opts.lambda` and `opts.rho`.
        `f(D, X) = 0.5*sum_{c=1}^C 05*||Y - DX||_F^2 +
                      sum_{c=1}^C ( ||Y_c - D_Cp1 X^Cp1_c - D_c X_c^c||F^2 +
                  sum_{i != c}||X^i_c||_F^2) + lambda*||X||_1 +
                  0.5*eta*sum_{i \neq c}||Di^T*Dc||_F^2`
        -----------------------------------------------
        Author: Tiep Vu, [email protected], 5/11/2016
                (http://www.personal.psu.edu/thv102/)
        -----------------------------------------------
        """
        cost = self.lambd * utils.norm1(self.X)
        cost1 = utils.normF2(self.Y - np.dot(self.D, self.X))
        DCp1 = self._getDc(self.nclass)
        for c in range(self.nclass):
            Dc = self._getDc(c)
            Yc = self._getYc(c)
            Xc = utils.get_block_col(self.X, c, self.Y_range)
            Xcc = utils.get_block_row(Xc, c, self.D_range_ext)
            XCp1c = utils.get_block_row(Xc, self.nclass, self.D_range_ext)

            cost1 += utils.normF2(Yc - np.dot(Dc, Xcc) - np.dot(DCp1, XCp1c))
            XX = Xc[:self.D_range_ext[-2], :]
            XX = np.delete(XX,
                           range(self.D_range_ext[c], self.D_range_ext[c + 1]),
                           axis=0)
            cost1 += utils.normF2(XX)

        cost += cost1 + .5*self.eta*utils.normF2(\
                utils.erase_diagonal_blocks(np.dot(self.D.T, self.D), \
                self.D_range_ext, self.D_range_ext))
        return cost
예제 #3
0
 def solve(self, Xinit=None, iterations=100, tol=1e-8, verbose=False):
     if Xinit is None:
         Xinit = np.zeros((self.D.shape[1], self.Y.shape[1]))
     Linv = 1 / self.L
     lambdaLiv = self.lambd / self.L
     x_old = Xinit.copy()
     y_old = Xinit.copy()
     t_old = 1
     it = 0
     # cost_old = float("inf")
     for it in range(iterations):
         x_new = np.real(
             utils.shrinkage(y_old - Linv * self._grad(y_old), lambdaLiv))
         t_new = .5 * (1 + math.sqrt(1 + 4 * t_old**2))
         y_new = x_new + (t_old - 1) / t_new * (x_new - x_old)
         e = utils.norm1(x_new - x_old) / x_new.size
         if e < tol:
             break
         x_old = x_new.copy()
         t_old = t_new
         y_old = y_new.copy()
         if verbose:
             print('iter \t%d/%d, loss \t %4.4f' %
                   (it + 1, iterations, self.lossF(x_new)))
     return x_new
예제 #4
0
 def loss(self):
     cost = 0
     for c in range(self.nclass):
         Yc = utils.get_block_col(self.Y, c, self.Y_range)
         Xc = self.X[c]
         Dc = utils.get_block_col(self.D, c, self.D_range)
         cost += 0.5 * utils.normF2(
             Yc - np.dot(Dc, Xc)) + self.lambd * utils.norm1(Xc)
     cost += 0.5*self.eta*utils.normF2(\
             utils.erase_diagonal_blocks(np.dot(self.D.T, self.D), self.D_range, self.D_range))
     return cost
    def train(self, fs, ls):
        e_fs = fs[:len(fs) / 2]
        e_ls = ls[:len(fs) / 2]
        t_fs = fs[len(fs) / 2:]
        t_ls = ls[len(fs) / 2:]
        self.estimate_q(e_fs, e_ls)

        for index, f in enumerate(t_fs):
            y = t_ls[index]
            W = map(lambda x, y: x - y, self.z_p, self.z_n)
            W = map(lambda x: x * self.B, W)
            d = utils.norm1(self.z_p) + utils.norm1(self.z_n)
            W = map(lambda x: x / d, W)
            self.ws.append(W)
            X = [.0 for i in range(self.w_dim)]
            for i in range(self.k):
                x_index = utils.sample_with_percents(self.q)
                X[x_index] += 1 / self.q[x_index] * f[x_index]
            X = [x / self.w_dim for x in X]
            WW = [abs(w) for w in W]
            sWW = sum(WW)
            if sWW != 0:
                percents = [ww / sWW for ww in WW]
                try:
                    w_index = utils.sample_with_percents(percents)
                except:
                    print W
                    raise
            else:
                w_index = random.choice([i for i in range(self.w_dim)])
            sign = 1.0 if W[w_index] > 0 else -1.0
            d = utils.norm1(W) * sign * f[w_index] - y
            gs = [x * d for x in X]
            for index, g in enumerate(gs):
                final_g = max(min(g, 1.0 / self.lr), -1.0 / self.lr)
                self.z_p[index] = self.z_p[index] * math.exp(
                    -self.lr * final_g)
                self.z_n[index] = self.z_n[index] * math.exp(self.lr * final_g)
예제 #6
0
def fista_l1(data, K, Kadj, Lambda, Lip=None, n_it=100, return_all=True):
    '''
    Beck-Teboulle's forward-backward algorithm to minimize the objective function
        ||K*x - d||_2^2 + Lambda*||x||_1
    When K is a linear operators.

    K : forward operator
    Kadj : backward operator
    Lambda : weight of the regularization (the higher Lambda, the more sparse is the solution in the H domain)
    Lip : largest eigenvalue of Kadj*K
    n_it : number of iterations
    return_all: if True, an array containing the values of the objective function will be returned
    '''

    if Lip is None:
        print("Warn: fista_l1(): Lipschitz constant not provided, computing it with 20 iterations")
        Lip = power_method(K, Kadj, data, 20)**2 * 1.2
        print("Lip = %e" % Lip)

    if return_all: en = np.zeros(n_it)
    x = np.zeros_like(Kadj(data))
    y = np.zeros_like(x)
    for k in range(0, n_it):
        grad_y = Kadj(K(y) - data)
        x_old = x
        w = y - (1.0/Lip)*grad_y
        w = _soft_thresh(w, Lambda/Lip)
        x = w
        y = x + (k/(k+10.1))*(x - x_old)
        # Calculate norms
        if return_all:
            fidelity = 0.5*norm2sq(K(x)-data)
            l1 = norm1(w)
            energy = fidelity + Lambda*l1
            en[k] = energy
            if (k%10 == 0):
                print("[%d] : energy %e \t fidelity %e \t L1 %e" % (k, energy, fidelity, l1))
        #~ elif (k%10 == 0): print("Iteration %d" % k)
    if return_all: return en, x
    else: return x
예제 #7
0
 def lossF(self, Xc):
     return self._calc_f(Xc) + utils.norm1(Xc)
예제 #8
0
 def lossF(self, X):
     return self._calc_f(X) + self.lambd * utils.norm1(X)
예제 #9
0
 def check_grad(self, X):
     grad1 = self._grad(X)
     grad2 = num_grad(self._calc_f, X)
     dif = utils.norm1(grad1 - grad2) / grad1.size
     print('grad difference = %.7f' % dif)
예제 #10
0
 def lossF(self, X1):
     return self._calc_f(X1) + self.lambd * norm1(X1)
예제 #11
0
파일: ODL.py 프로젝트: hengee/DICTOL_python
 def loss(self):
     l = 0.5*utils.normF2(self.Y - np.dot(self.D, self.X)) + \
             self.lambd*utils.norm1(self.X)
     return l