def feature_morph(latent, boundary, effect_coef, latent_optimizer, facenet, n_iters=100, identity_correction=True, identity_coef=2): device = latent_optimizer.device latent = torch.tensor(latent, dtype=torch.float32, device=device) latent.requires_grad_(True) with torch.no_grad(): original_img = latent_optimizer.model.synthesis(latent) original_emb = facenet(original_img) boundary = torch.tensor(boundary, dtype=torch.float32, device=device) boundary_expended = torch.tile(boundary, (1, 18, 1)).to(device) path = [] for step in range(n_iters): with torch.no_grad(): latent += boundary * (effect_coef / n_iters) if identity_correction: if (latent.grad is not None): latent.grad.zero_() new_img = latent_optimizer.model.synthesis(latent) new_emb = facenet(new_img) loss = torch.inner(original_emb, new_emb) loss.backward() with torch.no_grad(): gradients = latent.grad projection = torch.inner(gradients.squeeze(), boundary.squeeze()) projected_b = projection.view(-1, 1) * boundary_expended.squeeze() ord_gradients = gradients - projected_b latent += identity_coef * ord_gradients path.append(latent.detach().cpu().numpy()) result = path[-1] return result, path
def forward(self, x): # x shape: N, 1 q = self.q(x).view(-1, 1) k = self.k(x) v = self.v(x) alpha = q * k alpha = self.softmax(alpha) out = torch.inner(v, alpha) return out
def blas_lapack_ops(self): m = torch.randn(3, 3) a = torch.randn(10, 3, 4) b = torch.randn(10, 4, 3) v = torch.randn(3) return ( torch.addbmm(m, a, b), torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)), torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)), torch.addr(torch.zeros(3, 3), v, v), torch.baddbmm(m, a, b), torch.bmm(a, b), torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)), # torch.cholesky(a), # deprecated torch.cholesky_inverse(torch.randn(3, 3)), torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)), torch.dot(v, v), torch.eig(m), torch.geqrf(a), torch.ger(v, v), torch.inner(m, m), torch.inverse(m), torch.det(m), torch.logdet(m), torch.slogdet(m), torch.lstsq(m, m), torch.lu(m), torch.lu_solve(m, *torch.lu(m)), torch.lu_unpack(*torch.lu(m)), torch.matmul(m, m), torch.matrix_power(m, 2), # torch.matrix_rank(m), torch.matrix_exp(m), torch.mm(m, m), torch.mv(m, v), # torch.orgqr(a, m), # torch.ormqr(a, m, v), torch.outer(v, v), torch.pinverse(m), # torch.qr(a), torch.solve(m, m), torch.svd(a), # torch.svd_lowrank(a), # torch.pca_lowrank(a), # torch.symeig(a), # deprecated # torch.lobpcg(a, b), # not supported torch.trapz(m, m), torch.trapezoid(m, m), torch.cumulative_trapezoid(m, m), # torch.triangular_solve(m, m), torch.vdot(v, v), )
def test_statistic(C0, T, sigma_hat, method="CM", C1=None): if method == "CM": # print(method) rslt = 3 / (sigma_hat) / sigma_hat / T * torch.inner(C0, C0) elif method == "KS": # print(method) rslt = torch.max(torch.max(torch.abs(C1)), torch.max( torch.abs(C0))) / sigma_hat else: print("no such method") rslt = None return rslt
def _mult(_w): _out = torch.zeros((x.size()[0], L)) for i in range(L): _out[:, i] = torch.inner(x[:, :, i], _w[:, i]) return _out
self.predict = torch.nn.Linear(5, 1) def forward(self, x): x = torch.nn.functional.relu(self.f1(x)) x = torch.nn.functional.relu(self.f2(x)) out = self.predict(x) return out M = DNN(MLP2, max_epochs=10) x = torch.linspace(100, 100, obs).reshape(-1, 1) # one-dim # x = torch.normal(0., 1., size = [obs]).reshape(-1, 1)# one-dim y = 4 * x.squeeze() + e0 # one-dim M.net_combine(x, y) # plt.scatter(M.net(x).detach(), e0.detach()) res = e0 - M.net(x).detach().squeeze() beta = torch.inner(x.squeeze(), y) / torch.inner(x.squeeze(), x.squeeze()) res0 = y - beta * x.squeeze() C = C_resid(res0, res, res0, len(e0)) sigma_hat = compute_w(x, y, res0, res, len(e0), methods=['ols', "deep learning"])[1] cm = test_statistic(C, obs, sigma_hat) cm_lst += [cm] if j % 100 == 0: print(j) print(f'{sigma_hat}, {cm}, {M.loss}, {beta}') # %%
models[path] = model # calculate the patterns outputs fc = model.model[0] tanh = model.model[1] output = tanh(fc(train_patterns)) first_layer_output[path] = output angles = {} temp = output.clone().detach().T for i in range(temp.shape[0]): for j in range(i+1, temp.shape[0]): a = temp[i] b = temp[j] inner = torch.inner(a, b) a_norm = a.pow(2).sum().pow(0.5) b_norm = b.pow(2).sum().pow(0.5) cos = inner / (a_norm * b_norm) angle = torch.acos(cos) * (180/math.pi) angles[(i, j)] = angle # save angles in dict units_angles[path] = angles print(f'number of pairs: {len(angles)}') # print range of angles min_a = 180 max_a = 0 for s, angle in angles.items(): if angle.item() < min_a: