def cond(x, p=None): _assertNoEmpty2d(x) if p in (None, 2): s = la.svd(x, compute_uv=False) return s[..., 0] / s[..., -1] elif p == -2: s = la.svd(x, compute_uv=False) r = s[..., -1] / s[..., 0] else: _assertRankAtLeast2(x) _assertNdSquareness(x) invx = la.inv(x) r = la.norm(x, ord=p, axis=(-2, -1)) * la.norm(invx, ord=p, axis=(-2, -1)) # Convert nans to infs unless the original array had nan entries orig_nan_check = np.full_like(r, ~np.isnan(r).any()) nan_mask = np.logical_and(np.isnan(r), ~np.isnan(x).any(axis=(-2, -1))) r = np.where(orig_nan_check, np.where(nan_mask, np.inf, r), r) return r
def construct_uv(Atemp, chiM): chiA = Atemp.shape[0] chitemp = min(chiA**2, chiM) utemp, stemp, vtemp = LA.svd(Atemp.reshape(chiA**2, chiA**2), full_matrices=False) U1 = utemp[:, :chitemp] @ np.diag(np.sqrt(stemp[:chitemp])) U1 = U1.reshape(chiA, chiA, chitemp) V1 = np.diag(np.sqrt(stemp[:chitemp])) @ vtemp[:chitemp, :] V1 = np.transpose(V1.reshape(chitemp, chiA, chiA), (1, 2, 0)) return U1, V1
n = A.shape eps = 1e-12 # accuracy delta = (eps/math.sqrt(d-1)) * la.norm(A) # cutting param C = A # tmp tensor G = [] # tt-cores r = [] # tt-ranks r.append(1) for k in range(1, d): C = np.reshape(C, (r[k-1] * n[k-1], int(N / (r[k-1] * n[k-1])))) # calc low-rank approximation u, s, v = la.svd(C) sum = 0 nsize = np.size(s) rres = np.size(s) for rk in range(0, nsize): for m in range(rk+1, nsize): sum = sum + (s[m] ** 2) if (sum <= (eps ** 2) * la.norm(A)) and (rres > rk): rres = rk + 1 sum = 0 r.append(rres) G.append(np.reshape(u[:, :r[k]], (r[k-1], n[k-1], r[k]))) s = np.diag(s) C = np.dot(s[:r[k], :r[k]], v[:r[k], :]) N = (N * r[k]) / (n[k-1] * r[k-1])
def svd(a, full_matrices=True, compute_uv=True): if isinstance(a, JaxArray): a = a.value u, s, vh = linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv) return JaxArray(u), JaxArray(s), JaxArray(vh)