def test_sum_and_labels(self): """Plot with and without sum or labels.""" def test(g): for sum in [None, True, False]: for labels in [None, True, False]: g.plot(sum=sum, labels=labels) test(filters.Heat(self._graph, 10)) # one filter test(filters.Heat(self._graph, [10, 100])) # multiple filters
def test_eigenvalues(self): """Plot with and without showing the eigenvalues.""" graph = graphs.Sensor(20, seed=42) graph.estimate_lmax() filters.Heat(graph).plot() filters.Heat(graph).plot(eigenvalues=False) graph.compute_fourier_basis() filters.Heat(graph).plot() filters.Heat(graph).plot(eigenvalues=True) filters.Heat(graph).plot(eigenvalues=False)
def test_heat(self): f = filters.Heat(self._G, normalize=False, scale=10) self._test_methods(f, tight=False) f = filters.Heat(self._G, normalize=False, scale=np.array([5, 10])) self._test_methods(f, tight=False) f = filters.Heat(self._G, normalize=True, scale=10) np.testing.assert_allclose(np.linalg.norm(f.evaluate(self._G.e)), 1) self._test_methods(f, tight=False) f = filters.Heat(self._G, normalize=True, scale=[5, 10]) np.testing.assert_allclose(np.linalg.norm(f.evaluate(self._G.e)[0]), 1) np.testing.assert_allclose(np.linalg.norm(f.evaluate(self._G.e)[1]), 1) self._test_methods(f, tight=False)
def test_inverse(self, frame_bound=3): """The frame is the pseudo-inverse of the original frame.""" g = filters.Heat(self._G, scale=[2, 3, 4]) h = g.inverse() Ag, Bg = g.estimate_frame_bounds() Ah, Bh = h.estimate_frame_bounds() np.testing.assert_allclose(Ag * Bh, 1) np.testing.assert_allclose(Bg * Ah, 1) gL = g.compute_frame(method='exact') hL = h.compute_frame(method='exact') I = np.identity(self._G.N) np.testing.assert_allclose(hL.T.dot(gL), I, atol=1e-10) pinv = np.linalg.inv(gL.T.dot(gL)).dot(gL.T) np.testing.assert_allclose(pinv, hL.T, atol=1e-10) # The reconstruction is exact for any frame (lower bound A > 0). y = g.filter(self._signal, method='exact') z = h.filter(y, method='exact') np.testing.assert_allclose(z, self._signal) # Not invertible if not a frame. if sys.version_info > (3, 4): g = filters.Expwin(self._G) with self.assertLogs(level='WARNING'): h = g.inverse() h.evaluate(self._G.e) # If the frame is tight, inverse is h=g/A. g += g.complement(frame_bound) h = g.inverse() he = g(self._G.e) / frame_bound np.testing.assert_allclose(h(self._G.e), he, atol=1e-10)
def test_kwargs(self): """Additional parameters can be passed to the mpl functions.""" g = filters.Heat(self._graph) g.plot(alpha=1) g.plot(linewidth=2) g.plot(linestyle='-') g.plot(label='myfilter')
def _make_filter(self, tau=10): ''' Build graph and heatmap for denoising. ''' graph = graphs.NNGraph(zip(self.x, self.y), k=self.num_clusters) graph.estimate_lmax() # higher tau, spikier signal, less points fn = filters.Heat(graph, tau=tau) return fn
def test_analysis(G): # Using Kronecker signal at the node 8.3 S = zeros(G.N) vertex_delta = 83 S[vertex_delta] = 1 g = filters.Heat(G) c_exact = g.analysis(G, S, method='exact') c_cheby = g.analysis(G, S, method='cheby') # c_lancz = g.analysis(G, S, method='lanczos') self.assertAlmostEqual(c_exact, c_cheby)
def test_frame(self): """The frame is a stack of functions of the Laplacian.""" g = filters.Heat(self._G, scale=[8, 9]) gL1 = g.compute_frame(method='exact') gL2 = g.compute_frame(method='chebyshev', order=30) def get_frame(freq_response): return self._G.U.dot(np.diag(freq_response).dot(self._G.U.T)) gL = np.concatenate([get_frame(gl) for gl in g.evaluate(self._G.e)]) np.testing.assert_allclose(gL1, gL) np.testing.assert_allclose(gL2, gL, atol=1e-10)
def test_approximations(self): r""" Test that the different methods for filter analysis, i.e. 'exact', 'cheby', and 'lanczos', produce the same output. """ # TODO: done in _test_methods. f = filters.Heat(self._G) c_exact = f.filter(self._signal, method='exact') c_cheby = f.filter(self._signal, method='chebyshev') np.testing.assert_allclose(c_exact, c_cheby) self.assertRaises(ValueError, f.filter, self._signal, method='lanczos')
def compute_fast_exponential(G, t): #tic() G.estimate_lmax() f = filters.Heat(G, tau=t) #Compute Chebyshev coefficients for a Filterbank. coeff = filters.compute_cheby_coeff(f, m=30) result = Parallel(n_jobs=8)(delayed(processInput)(G, coeff, i) for i in range(0, G.N)) result = np.array(result) #toc() return result[:, 0]
def test_all_filters(self): """Plot all filters.""" for classname in dir(filters): if not classname[0].isupper(): # Not a Filter class but a submodule or private stuff. continue Filter = getattr(filters, classname) if classname in ['Filter', 'Modulation', 'Gabor']: g = Filter(self._graph, filters.Heat(self._graph)) else: g = Filter(self._graph) g.plot() plotting.close_all()
def test_localize(self): g = filters.Heat(self._G, 100) # Localize signal at node by filtering Kronecker delta. NODE = 10 s1 = g.localize(NODE, method='exact') # Should be equal to a row / column of the filtering operator. gL = self._G.U.dot(np.diag(g.evaluate(self._G.e)[0]).dot(self._G.U.T)) s2 = np.sqrt(self._G.N) * gL[NODE, :] np.testing.assert_allclose(s1, s2) # That is actually a row / column of the analysis operator. F = g.compute_frame(method='exact') np.testing.assert_allclose(F, gL)
def chebychev_sequential(G, t): #tic() f = filters.Heat(G, tau=t) coeff = filters.compute_cheby_coeff(f, m=30) res = [] for i in range(0, G.N): delta = np.zeros(G.N, dtype=int) delta[i] = 1 f_prime = cheby_op(G, coeff, delta) concent_i = np.linalg.norm(f_prime) res.append(concent_i) #toc_time = toc() return np.array(res)
def heatDiffusion(G, taus=taus): ''' heat diffusion visualization ''' g = filters.Heat(G, taus) s = np.zeros(G.N) DELTA = 20 s[DELTA] = 1 s = g.filter(s, method='chebyshev') fig = plt.figure(figsize=(10, 3)) for i in range(g.Nf): ax = fig.add_subplot(1, g.Nf, i + 1, projection='3d') G.plot_signal(s[:, i], ax=ax) title = r'Heat diffusion, $\ tau={}$'.format(taus[i]) _ = ax.set_title(title) ax.set_axis_off() fig.tight_layout()
def compute_companies(G, ROI, start_indices, tau, nb_best_companies): """ Algorithm that computes neighbors of starting vertices by filtering a signal according to the Heat diffusion equation. """ f = filters.Heat(G, tau) weighted_delta = np.zeros(G.N) weighted_delta[start_indices] = ROI[start_indices] / np.sum(ROI[start_indices]) s = f.filter(weighted_delta) plt.subplot(211) plt.plot(weighted_delta) plt.subplot(212) plt.plot(s) company_indices = np.flip(np.argsort(s),0)[:nb_best_companies] threshold = s[company_indices[-1]] plt.plot([threshold]*G.N, "r") return list(set(company_indices) - set(start_indices))
def wavelet_basis_chebyshev(dataset, adj, s, laplacian_normalize, sparse_ness, threshold, weight_normalize): from weighting_func import laplacian, fourier, weight_wavelet, weight_wavelet_inverse from pygsp import graphs, filters G = graphs.Graph(adj) taus = [s] g = filters.Heat(G, taus) signal_matrix = np.identity(G.N) Weight = g.filter(signal_matrix, method='chebyshev', order=50) # taus = [-s] # g = filters.Heat(G, taus) # signal_matrix = np.identity(G.N) # inverse_Weight = g.filter(signal_matrix, method='chebyshev', order=30) # 逆变换和正变换s不能共享,否则不可逆 L = laplacian(adj, normalized=laplacian_normalize) lamb, U = fourier(dataset, L) # Weight = weight_wavelet(s, lamb, U) inverse_Weight = weight_wavelet_inverse(1.0, lamb, U) del U, lamb if (sparse_ness): Weight[Weight < threshold] = 0.0 inverse_Weight[inverse_Weight < threshold] = 0.0 print len(np.nonzero(Weight)[0]) print len(np.nonzero(inverse_Weight)[0]) if (weight_normalize == True): Weight = normalize(Weight, norm='l1', axis=1) inverse_Weight = normalize(inverse_Weight, norm='l1', axis=1) Weight = sp.csr_matrix(Weight) inverse_Weight = sp.csr_matrix(inverse_Weight) print Weight, inverse_Weight t_k = [inverse_Weight, Weight] # t_k = [Weight] return sparse_to_tuple(t_k)
def denoise_cluster(self, points, num_cluster, tau=10): """ Determine if the cluster after denoising is the same as the original :param points: :return: [boolean], false means cluster_id varies, true means cluster_id is preserved """ length = len(points) graph = graphs.NNGraph(points, k=num_cluster) graph.estimate_lmax() fn = filters.Heat(graph, tau=tau) signal = np.empty(num_cluster * length).reshape( num_cluster, length) # create num_cluster*len(points) matrix vectors = np.zeros(length * num_cluster).reshape( length, num_cluster) # create len(points)*num_cluster matrix # fill the vectors sparse matrix for i, vec in enumerate(vectors): vec[self.cluster_list[i]] = 1 vectors = vectors.T # fill the denoising matrix, find the dominant cluster of each points for cluster_num, vec in enumerate(vectors): signal[cluster_num] = fn.analyze(vec) # see if the dominant cluster after denoising is the same as the original cluster dominant_cluster = np.argmax(signal, axis=0) vor_points, vor_clusters = [], [] for index, coor in enumerate(self.points): if dominant_cluster[index] == int(self.cluster_list[index]): vor_points.append(coor) vor_clusters.append(self.cluster_list[index]) self.points = vor_points self.cluster_list = vor_clusters
from pygsp import graphs, filters G = graphs.Logo() G.estimate_lmax() g = filters.Heat(G, tau=100) import numpy as np DELTAS = [20, 30, 1090] s = np.zeros(G.N) s[DELTAS] = 1 s = g.filter(s) G.plot_signal(s, highlight=DELTAS, backend='matplotlib')
def test_heat(G): g = filters.Heat(G)
def test_ax(self): """Axes are returned, but automatically created if not passed.""" fig, ax = plt.subplots() fig2, ax2 = filters.Heat(self._graph).plot(ax=ax) self.assertIs(fig2, fig) self.assertIs(ax2, ax)
def get_filter(params, graph): if params.filter_name == 'heat': return filters.Heat(G=graph, scale=params.frequency) elif params.filter_name == 'simoncelli': return filters.Simoncelli(G=graph, a=params.frequency) assert False