def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = sp.prox_l2(self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes)
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = prox_l2(self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes)
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.""" AXU = self.AX + self.U self.Y[..., 0:-1] = sp.prox_l2(AXU[..., 0:-1], self.mu / self.rho) self.Y[..., -1] = sp.prox_l1(AXU[..., -1], (self.lmbda / self.rho) * self.Wl1)
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.""" AXU = self.AX + self.U self.Y[..., 0:-1] = sp.prox_l2(AXU[..., 0:-1], self.mu/self.rho) self.Y[..., -1] = sp.prox_l1(AXU[..., -1], (self.lmbda/self.rho) * self.Wl1)
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.""" AXU = self.AX + self.U self.block_sep0(self.Y)[:] = sp.prox_l1( self.block_sep0(AXU), (self.lmbda/self.rho) * self.Wl1) self.block_sep1(self.Y)[:] = sp.prox_l2( self.block_sep1(AXU), self.mu/self.rho, axis=(self.cri.axisC, -1))
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y[..., 0:-1] = prox_l2(self.AX[..., 0:-1] + self.U[..., 0:-1], (self.lmbda / self.rho) * self.Wtvna, axis=self.saxes) self.Y[..., -1] = prox_l1(self.AX[..., -1] + self.U[..., -1] - self.S, (1.0 / self.rho) * self.Wdf)
def resolvent_d_l2(d, y, s, rho, N, M, d_size): rootN = int(np.sqrt(N)) d = d.reshape(M, rootN, rootN) d = d.transpose(1, 2, 0) d_Pcn = cnvrep.Pcn(d.reshape(rootN, rootN, 1, 1, M), (d_size, d_size, M), Nv=(rootN, rootN)).squeeze() d_Pcn = d_Pcn.transpose(2, 0, 1) d = d_Pcn.reshape(N * M) y = y - (pr.prox_l2(y - s, 1 / rho) + s) return np.concatenate([d, y], 0)
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y[..., 0:-1] = sp.prox_l2( self.AX[..., 0:-1] + self.U[..., 0:-1], (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes) self.Y[..., -1] = sp.prox_l1( self.AX[..., -1] + self.U[..., -1] - self.S, (1.0/self.rho)*self.Wdf)
def coefficient_learning_l2(D, x, y, s, N, M, rho, lamd, d_size, ite=200): df = D_to_df(D, N, d_size) reX = [] s_count = 0 xlog = [] ylog = [] zero = [] for j in range(len(x)): s_count = s_count + 1 # y = make_y(df, x[j], N, M) # y = np.random.normal(0, 1, N * N) count = 0 while True: pr_x = pr.prox_l1(x[j], lamd / rho) pr_y = y[j] - (pr.prox_l2(y[j] - s[j], 1 / rho) + s[j]) bx = 2 * pr_x - x[j] by = 2 * pr_y - y[j] bxf = np.zeros(bx.shape, dtype=np.complex) for i in range(M): bxf[i * N:(i + 1) * N] = np.fft.fft(bx[i * N:(i + 1) * N]) byf = np.fft.fft(by) IDDt = make_IDDt(df, N, M, rho) hidari = make_hidari(IDDt, df, bxf, N, M, rho) migi = make_migi(IDDt, df, byf, N, M, rho) xy = np.concatenate([x[j], y[j]], 0) pr_xy = np.concatenate([pr_x, pr_y], 0) xy = xy + migi + hidari - pr_xy # rsdl_n = max(np.linalg.norm(migi[:N * M] + hidari[:N * M]), np.linalg.norm(pr_xy[:N * M])) # rsdl_ny = max(np.linalg.norm(migi[N * M:] + hidari[N * M:]), np.linalg.norm(pr_xy[N * M:])) # move = np.linalg.norm(migi[:N * M] + hidari[:N * M] - pr_xy[:N * M]) / rsdl_n x[j] = xy[:N * M] y[j] = xy[N * M:] move = pr.prox_l1(x[j], lamd / rho) - pr_x xlog.append(np.linalg.norm(move)) # move = np.linalg.norm(migi[N * M:] + hidari[N * M:] - pr_xy[N * M:]) / rsdl_ny move = y[j] - (pr.prox_l1(y[j] - s[j], 1 / rho) + s[j]) - pr_y ylog.append(np.linalg.norm(move)) h = pr.prox_l1(x[j], lamd / rho) hizero = np.linalg.norm(h.astype(np.float64), ord=0) # a, hizero = check(df, xy, s, N, M, lamd) count = count + 1 print("count :", count, "move[" + str(s_count) + "]", np.linalg.norm(move), "hizero", hizero) if count >= ite: break zero.append(hizero) # print("final gosa", gosa) print("final coefcount :", count) reX.append(h) return reX, y, xlog[:ite], ylog[:ite], sum(zero) / len(zero)