def dottest(op, domain_tensor, range_tensor): d1 = torch.randn(domain_tensor.shape) r1 = torch.randn(range_tensor.shape) r2 = op.forward(d1) d2 = op.adjoint(r1) d_ = torch.vdot(d1.view(-1), d2.view(-1)) r_ = torch.vdot(r1.view(-1), r2.view(-1)) err_abs = d_ - r_ err_rel = err_abs / d_ print("Absolute error: %.6e" % abs(err_abs.item())) print("Relative error: %.6e \n" % abs(err_rel.item()))
def blas_lapack_ops(self): m = torch.randn(3, 3) a = torch.randn(10, 3, 4) b = torch.randn(10, 4, 3) v = torch.randn(3) return ( torch.addbmm(m, a, b), torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)), torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)), torch.addr(torch.zeros(3, 3), v, v), torch.baddbmm(m, a, b), torch.bmm(a, b), torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)), # torch.cholesky(a), # deprecated torch.cholesky_inverse(torch.randn(3, 3)), torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)), torch.dot(v, v), torch.eig(m), torch.geqrf(a), torch.ger(v, v), torch.inner(m, m), torch.inverse(m), torch.det(m), torch.logdet(m), torch.slogdet(m), torch.lstsq(m, m), torch.lu(m), torch.lu_solve(m, *torch.lu(m)), torch.lu_unpack(*torch.lu(m)), torch.matmul(m, m), torch.matrix_power(m, 2), # torch.matrix_rank(m), torch.matrix_exp(m), torch.mm(m, m), torch.mv(m, v), # torch.orgqr(a, m), # torch.ormqr(a, m, v), torch.outer(v, v), torch.pinverse(m), # torch.qr(a), torch.solve(m, m), torch.svd(a), # torch.svd_lowrank(a), # torch.pca_lowrank(a), # torch.symeig(a), # deprecated # torch.lobpcg(a, b), # not supported torch.trapz(m, m), torch.trapezoid(m, m), torch.cumulative_trapezoid(m, m), # torch.triangular_solve(m, m), torch.vdot(v, v), )
def vdot(self, other): if not self.is_complex: return torch.dot(self, other) if self.is_conj(): if other.is_conj(): return torch.vdot(other.conj(), self.conj()) else: return torch.dot(self.conj(), other) elif other.is_conj(): return torch.dot(self, other.conj()).conj() dot_check(self, other) return self.new_empty(())
def dot(self, other): # Checking whether the input is a vector or not if not isinstance(other, VectorTorch): raise TypeError("Provided input vector not a %s!" % self.whoami) # Checking size (must have same number of elements) if self.size != other.size: raise ValueError("Vector size mismatching: self = %d; other = %d" % (self.size, other.size)) # Checking dimensionality if not self.checkSame(other): raise ValueError( 'Dimensionality not equal: self = %s; other = %s' % (self.shape, other.shape)) return torch.vdot(self.getNdArray().flatten(), other.getNdArray().flatten())
a_model = A(S_model, psi_model, pos[take_ind]) loss = smooth_amplitude_loss(a_model, indices_target[take_ind], counts_target[take_ind]) loss_sum = loss.mean() sum_loss += loss_sum.item() loss_sum.backward() # if i > probe_start: # plotAbsAngle(psi_model.grad[0].cpu().detach().numpy(),'psi_model.grad') # plotAbsAngle(S_model[0].cpu().detach().numpy(), 'S_model') optimizer.step() optimizer.zero_grad() c = th.vdot(T[slic].ravel(), S_model[slic].ravel()) T_hat = T * th.exp(-1j * th.angle(c)) dist = th.norm(S_model[slic] - T_hat[slic]) x_norm = th.norm(T) err = dist / x_norm errs.append(err) sum_loss /= n_batches losses.append(sum_loss) print(f'{i:3d} loss: {sum_loss} err: {err}') # print(f'i {i} loss {sum_loss}, C_model = {C_model[0]} , C_target = {C_target[0]}') # %% d = margin + M[0] // 2 d = 1 plotAbsAngle(S_model[0, d:-d, d:-d].cpu().detach().numpy(),