def f_Y(Y_in): Y = np.reshape(Y_in, (k, n)) L = np.dot(U, Y) loss_data = grslra.lpnorm(X - L) E = (L - hankel.orth_proj(L)) loss_lambda = -innerprod(Lambda, E) / (m * n) loss_structure = rho / 2.0 * np.linalg.norm(E, 'fro')**2 / (m * n) return loss_data + loss_lambda + loss_structure
def prepare(self, iteration): G_old = self.G H_old = self.H self.prepare_grad() if not (iteration % self.params["direction_reset_rate"]): # set search direction as negative gradient at initialization or when reset due self.H = self.space.get_H(self.G, 0.0, 0.0) else: tauG = self.space.transport(G_old, self.X_old, H_old, self.t) tauH = self.space.transport(H_old, self.X_old, H_old, self.t) gamma_HS = innerprod(self.G - tauG, self.G) / (innerprod(self.G - tauG, tauH) + 1E-16) # Hestenes-Stiefel update gamma_HS = np.maximum(gamma_HS, 0) # gamma_DY = innerprod(self.G, self.G) / (innerprod(self.G - tauG, tauH)) # Dai-Yuan update # gamma_FR = innerprod(self.G, self.G) / innerprod(tauG, tauG) # Fletcher-Reeves update # gamma_PR = np.maximum(innerprod(self.G, self.G - tauG) / innerprod(tauG, tauG), 0.0) # Polak-Ribiere update self.H = self.space.get_H(self.G, gamma_HS, tauH) self.GH = innerprod(self.G, self.H) if self.GH > 0: self.H = self.space.get_H(self.G, 0.0, 0.0) self.GH = innerprod(self.G, self.H)
def f_Lambda_U(U_in): U = np.reshape(U_in, (m, k)) L = np.dot(U, Y) E = (L - hankel.orth_proj(L)) loss_lambda = -innerprod(Lambda, E) / (m * n) return loss_lambda
def f_Lambda_Y(Y_in): Y = np.reshape(Y_in, (k, n)) L = np.dot(U, Y) E = (L - hankel.orth_proj(L)) loss_lambda = -innerprod(Lambda, E) / (m * n) return loss_lambda
def prepare(self, iteration): self.prepare_grad() self.H = self.space.get_H(self.G, 0.0, 0.0) self.GH = innerprod(self.G, self.H)