Esempio n. 1
0
    def graph_and_signal_learning(self, time):
        if (time % self.jump_step == 0):
            if (self.mode == 1) or (self.denoised_signal is None):
                print('mode 1, Update Graph On Noisy Signal')
                Z = euclidean_distances(self.avaiable_noisy_signal.T,
                                        squared=True)
            elif (self.mode == 2):
                print('mode 2, Update Graph On Mixed Signal')
                Z = euclidean_distances(self.mix_signal.T, squared=True)
            elif (self.mode == 3):
                print('mode 3, Update Graph On Denoised Signal')
                Z = euclidean_distances(self.denoised_signal.T, squared=True)
            else:
                pass

            np.fill_diagonal(Z, 0)
            Z = norm_W(Z, self.user_num)
            primal_gl = Primal_dual_gl(self.user_num,
                                       Z,
                                       alpha=self.gl_alpha,
                                       beta=self.gl_beta,
                                       step_size=self.gl_step_size)
            primal_adj, error = primal_gl.run()
            self.adj = primal_adj.copy()
            del error
        else:
            pass

        lap = csgraph.laplacian(self.adj, normed=False)
        self.denoised_signal = np.dot(
            self.avaiable_noisy_signal,
            np.linalg.inv((np.identity(self.user_num) + self.gl_theta * lap)))
        self.noisy_signal_copy[self.picked_items] = self.denoised_signal
def learn_graph(signal, node_num, alpha, beta, step_size):
	Z=euclidean_distances(signal.T, squared=True)
	np.fill_diagonal(Z,0)
	Z=norm_W(Z, node_num)
	primal_gl=Primal_dual_gl(node_num, Z, alpha=alpha, beta=beta, step_size=step_size)
	primal_adj, error=primal_gl.run()
	lap=csgraph.laplacian(primal_adj, normed=False)
	return primal_adj, lap
Esempio n. 3
0
def Primal_dual_gl_loop(node_num,
                        signal,
                        iteration,
                        alpha=1,
                        beta=0.2,
                        theta=0.01,
                        step_size=0.5):
    for i in range(iteration):
        print('Primal Dual GL Time ~~~~~~~~~', i)
        Z = euclidean_distances(signal.T, squared=True)
        np.fill_diagonal(Z, 0)
        Z = norm_W(Z, node_num)
        primal_gl = Primal_dual_gl(node_num,
                                   Z,
                                   alpha=alpha,
                                   beta=beta,
                                   step_size=step_size)
        primal_adj, error = primal_gl.run()
        print('graph error', error[-1])
        lap = csgraph.laplacian(primal_adj, normed=False)
        signal = np.dot(signal,
                        np.linalg.inv((np.identity(node_num) + theta * lap)))
    return primal_adj, lap, signal
error_sigma = 0.01
knn_adj, knn_lap, knn_pos = knn_graph(node_num)
X, X_noise, item_features = generate_signal(signal_num, node_num, knn_pos,
                                            error_sigma)

rbf_dis = rbf_kernel(X_noise.T)
np.fill_diagonal(rbf_dis, 0)

Z = rbf_dis
Z = filter_graph_to_knn(Z, node_num)

alpha = 3
beta = 0.5
w_0 = np.zeros(int((node_num - 1) * node_num / 2))
c = 0
primal_gl = Primal_dual_gl(node_num, Z, alpha, beta, c=c)
vector_adj, primal_adj, error = primal_gl.run()

fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(5, 8))
c1 = ax1.pcolor(adj_matrix, cmap='RdBu')
ax1.set_title('Ground Truth W')
c2 = ax2.pcolor(primal_adj, cmap='RdBu')
ax2.set_title('Learned W')
fig.colorbar(c1, ax=ax1)
fig.colorbar(c2, ax=ax2)
plt.show()

fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(5, 8))
c1 = ax1.pcolor(adj_matrix, cmap='RdBu')
ax1.set_title('Ground Truth W')
c2 = ax2.pcolor(primal_adj, cmap='RdBu')
Esempio n. 5
0
smoothness = []
primal_adj = np.identity(node_num)
for i in range(5):
    print('i', i)
    Z = euclidean_distances(signals.T, squared=True)
    np.fill_diagonal(Z, 0)
    Z = norm_W(Z, node_num)

    ##graph learning
    alpha = 1  ## bigger alpha --- bigger weights
    beta = 0.2  ### bigger beta --- more dense ## For GL_sigrep beta is not used.
    theta = 0.01
    #primal_gl=Gl_sigrep(node_num, Z, alpha=alpha, beta=beta, step_size=0.5)
    primal_gl = Primal_dual_gl(node_num,
                               Z,
                               alpha=alpha,
                               beta=beta,
                               step_size=0.01)
    primal_adj, error = primal_gl.run(adj_matrix)
    laplacian = csgraph.laplacian(primal_adj, normed=False)
    signals = np.dot(
        signals, np.linalg.inv((np.identity(node_num) + theta * laplacian)))
    smooth = calculate_smoothness(signals, laplacian)
    smoothness.append(smooth)

    #print('adj_matrix \n', adj_matrix)
    #print('primal_adj \n', primal_adj)

    print('X\n', X[0, :])
    print('signals\n', signals[0, :])
ZZ = norm_W(Z, node_num)

alpha_list = np.arange(0, 10, 0.1)
beta_list = np.arange(0, 2, 0.1)
s_list = []
ge_list = []
edge_num_list = []
edge_weight_list = []
par_list = []
for alpha in alpha_list:
    for beta in beta_list:
        print('alpha, beta', alpha, beta)
        par_list.extend([[alpha, beta]])
        primal_gl = Primal_dual_gl(node_num,
                                   ZZ,
                                   alpha=alpha,
                                   beta=beta,
                                   step_size=0.5)
        primal_adj, error = primal_gl.run()
        primal_lap = csgraph.laplacian(primal_adj, normed=False)
        graph_error = np.linalg.norm(primal_adj - true_adj)
        ge_list.extend([graph_error])
        sm = find_smoothness(test_signal, primal_lap)
        s_list.extend([sm])
        a = primal_adj[np.triu_indices(node_num, 1)]
        b = a[a > 0.1]
        edge_num = len(b)
        edge_weight = np.sum(primal_adj)
        edge_weight_list.extend([edge_weight])
        edge_num_list.extend([edge_num])
nodes=nx.draw_networkx_nodes(graph, node_features, node_size=100, node_color=clear_sample_signal, cmap=plt.cm.jet)
edges=nx.draw_networkx_edges(graph, node_features, width=1.0, alpha=0.5, edge_color=edge_color, edge_cmap=plt.cm.gray, vmin=0.0, vmax=1.0)
plt.axis('off')
plt.title('True Graph', fontsize=12)
plt.show()


alpha=1
beta=0.2
step_size=0.5
theta=10
test_signal=clear_signal
Z=euclidean_distances(test_signal.T, squared=True)
np.fill_diagonal(Z,0)
Z=norm_W(Z, node_num)
primal_gl=Primal_dual_gl(node_num, Z, alpha=alpha, beta=beta, step_size=step_size)
primal_adj, error=primal_gl.run()
lap=csgraph.laplacian(primal_adj, normed=False)
signal=np.dot(test_signal, np.linalg.inv((np.identity(node_num)+theta*lap)))

pos=node_features
fig, (ax1, ax2)=plt.subplots(1,2)
ax1.scatter(pos[:,0], pos[:,1], c=test_signal[0], cmap=plt.cm.jet)
ax2.scatter(pos[:,0], pos[:,1], c=signal[0], cmap=plt.cm.jet)
plt.show()

def learn_graph(signal, node_num, alpha, beta, step_size):
	Z=euclidean_distances(signal.T, squared=True)
	np.fill_diagonal(Z,0)
	Z=norm_W(Z, node_num)
	primal_gl=Primal_dual_gl(node_num, Z, alpha=alpha, beta=beta, step_size=step_size)
Esempio n. 8
0
trace1 = []
trace2 = []
trace3 = []
trace4 = []

signal = X[0, :]

for i in np.arange(10):
    print('i', i)

    Z = euclidean_distances(signal.reshape(-1, 1), squared=True)
    np.fill_diagonal(Z, 0)
    ## Learn Graph
    alpha = 0.1
    beta = 1
    primal_gl = Primal_dual_gl(node_num, Z, alpha, beta)
    primal_adj, error = primal_gl.run(adj_matrix)
    laplacian = csgraph.laplacian(primal_adj, normed=False)
    ## Learn signal
    gamma = 3
    G = graphs.Graph(primal_adj)
    G.compute_differential_operator()
    L = G.D.toarray()
    d = pyunlocbox.functions.dummy()
    r = pyunlocbox.functions.norm_l2(A=L, tight=False)
    f = pyunlocbox.functions.norm_l2(w=1, y=signal.copy(), lambda_=gamma)

    step = 0.999 / np.linalg.norm(np.dot(L.T, L) + gamma, 2)
    solver = pyunlocbox.solvers.gradient_descent(step=step)
    x0 = signal.copy()
    prob2 = pyunlocbox.solvers.solve([r, f],