def test_multi_level(self, dtype, device): N = 16 G = rand_udg(N, 0.2, dtype, device) bptG, beta, _, _, _ = harary(G) basis = QmfOperator(bptG, beta, order=4, device=device) x = torch.ones(N, 1, dtype=dtype, device=device) y = basis(x) z = basis.inverse_transform(y) print("\nsnr: ", snr(z.permute(-1, -2), x.permute(-1, -2)).item(), "dB.") print("dis: ", (z - x).abs().sum())
def test_multi_level(self, dtype, device): N = 120 G = rand_udg(N, 0.4, dtype, device) bptG, beta, _, _, _ = harary(G) basis = BiorthOperator(bptG, beta, k=2, device=device) x = torch.ones(N, 1, dtype=dtype, device=device) y = basis.transform(x) z = basis.inverse_transform(y) print("\nsnr: ", snr(z.permute(-1, -2), x.permute(-1, -2)).item(), "dB.") print("dis: ", (z - x).abs().sum()) self.display_density(basis.operator, basis.inv_operator)
def test_transform(self, device, dtype): M, N = 2, 32 K = 30 Bs = [] beta = [] bpg, bt = rand_bipartite(N // 2, N - N // 2, p=0.2, dtype=dtype, device=device, return_partition=True) Bs.append(bpg) beta.append(bt) mask = Bs[0].to_dense() != 0 for i in range(M - 1): bpg, bt = rand_bipartite( N // 2, N - N // 2, p=0.2, dtype=dtype, device=device, return_partition=True, ) dense = bpg.to_dense() temp_mask = dense != 0 dense[mask] = 0 mask = temp_mask + mask Bs.append(SparseTensor.from_dense(dense)) if i % 2 == 0: beta.append(bt) else: beta.append(~bt) beta = torch.stack(beta).T qmf = QmfCore(bipartite_graphs=Bs, beta=beta, order=K) x = torch.rand(N, dtype=dtype, device=device) y = qmf.analyze(x) assert y.shape == (2**M, N, 1) z = qmf.synthesize(y) assert z.shape == (2**M, N, 1) assert (z.sum(0).squeeze() - x).abs().sum() != 0 z.squeeze_() f_hat = z.sum(0) dis = (f_hat - x).abs() pf = snr(f_hat, x) ppprint(dis, pf)
def test_transform(self, dtype, device, strategy, M, N): ray.init(num_cpus=2, log_to_driver=False, ignore_reinit_error=True) graph = rand_udg(N, device=device, dtype=dtype) qmf = NumQmf(graph, strategy=strategy, level=M) f = torch.rand(N, device=device, dtype=dtype) y = qmf.analyze(f) z = qmf.synthesize(y) z.squeeze_() f_hat = z.sum(0) dis = (f_hat - f).abs() pf = snr(f_hat, f).item() print(f"\n|----- Strategy: {strategy:8s}, M={M}, " f"Device: {str(device):5s}, Dtype: {str(dtype):6s} -----") ppprint(dis, pf) assert pf > 20
def test_transform(self, dtype, device, strategy, Ci): N = 60 graph = rand_udg(N, device=device, dtype=dtype) qmf = ColorQmf(graph, strategy=strategy, in_channels=1) M = qmf.num_bgraph f = torch.rand(N, Ci, device=device, dtype=dtype) y = qmf.analyze(f) z = qmf.synthesize(y) z.squeeze_() f_hat = z.sum(0) dis = (f_hat - f).abs() pf = snr(f_hat, f.squeeze_()).item() print(f"\n|-- Strategy: {strategy:8s}, M={M}, " f"Device: {str(device):5s}, Dtype: {str(dtype):6s} ---") ppprint(dis, pf) assert pf > 20
def test_transform(self, dtype, device, strategy, zero_dc): N = 100 graph = rand_udg(N, device=device, dtype=dtype) bio = ColorBiorth(graph, strategy=strategy) M = bio.num_bgraph f = torch.rand(N, device=bio.device, dtype=bio.dtype) y = bio.analyze(f) z = bio.synthesize(y) z.squeeze_() f_hat = z.sum(0) dis = (f_hat - f).abs() pf = snr(f_hat, f).item() print(f"\n|----- Strategy: {strategy:8s}, M:{M:4d} " f"Device: {str(device):5s}, Dtype: {str(dtype):6s}") ppprint(dis, pf) assert pf > 20
def test_one_level(self, dtype, device): N1 = 6 N2 = 4 bptG, beta = rand_bipartite(N1, N2, 0.5, dtype=dtype, return_partition=True) basis = QmfOperator([bptG.to_scipy("csr")], beta.view(-1, 1), order=20, device=device) x = torch.ones(N1 + N2, 1, dtype=dtype, device=device) y = basis.transform(x) z = basis.inverse_transform(y) print("\nsnr: ", snr(z.permute(-1, -2), x.permute(-1, -2)).item(), "dB.") print("dis: ", (z - x).abs().sum())
def test_one_level(self, dtype, device): N1 = 60 N2 = 40 bptG, beta = rand_bipartite(N1, N2, 0.2, dtype=dtype, return_partition=True) basis = BiorthOperator([bptG.to_scipy("csr")], beta.view(-1, 1), k=2, device=device) x = torch.ones(N1 + N2, 1, dtype=dtype, device=device) y = basis.transform(x) z = basis.inverse_transform(y) print("\nsnr: ", snr(z.permute(-1, -2), x.permute(-1, -2)).item(), "dB.") print("dis: ", (z - x).abs().sum()) self.display_density(basis.operator, basis.inv_operator)
def test_transform(self, dtype, device, strategy, M, part): ray.init(num_cpus=2, log_to_driver=False, ignore_reinit_error=True) N = 32 * 3 print(f"\n|----- Strategy: {strategy:8s}, M={M}, " f"Device: {str(device):5s}, Dtype: {str(dtype):6s} ----") kwargs = dict() if strategy == "admm": print("|admm-lbga part strategy: {}".format(part)) kwargs["part"] = part graph = rand_udg(N, device=device, dtype=dtype) bio = NumBiorth(graph, strategy=strategy, level=M, **kwargs) f = torch.randn(N, device=device, dtype=dtype) y = bio.analyze(f) z = bio.synthesize(y) z.squeeze_() f_hat = z.sum(0) dis = (f_hat - f).abs() pf = snr(f_hat, f).item() ppprint(dis, pf) assert pf > 20