예제 #1
0
from tffactorization.tfnmf import TFNMF
import pandas as pd
from scipy.sparse import lil_matrix
import tensorflow as tf
import os


os.system("rm -rf logadagrad")

elist, com = pd.read_pickle("polblogs.pkl")
mat = lil_matrix((1222,1222))
for n1, n2 in elist:
    mat[n1,n2] = 1
    mat[n2, n1] = 1
V = mat.todense()
tfnmf = TFNMF(V,2, algo="grad", learning_rate=0.1)

sess = tf.InteractiveSession()
W, H = tfnmf.run(sess, logfile="logadagrad", max_iter=1000)

import tensorflow as tf
예제 #2
0
from tffactorization.tfnmf import TFNMF
from sklearn.datasets import load_sample_images

os.system("rm -rf logtest")
sess = tf.InteractiveSession()
writer = tf.train.SummaryWriter("logtest",sess.graph_def)
N = 8
K = 2
v = np.random.random(size=[N,N]).astype(np.float32)
v = load_sample_images().images[0][0:400,0:200,0]
N = v.shape[0]
M = v.shape[1]
w = np.random.rand(N,K).astype(np.float32)
h = np.random.rand(K,M).astype(np.float32)

tfnmf = TFNMF(v,K)
start = time.time()
W3, H3 = tfnmf.run(sess)
end = time.time()
loss = np.power(v - np.matmul(W3,H3),2).sum() / (M*N)
print(end-start)
print("loss:",loss)

V = tf.placeholder("float", shape=[N,M])
W = tf.Variable(w)
H = tf.Variable(h)
loss = tf.nn.l2_loss(V - tf.matmul(W,H))/(N*M)
tf.scalar_summary("loss", loss)
merged = tf.merge_all_summaries()
opt1 = ClippedAdagradOptimizer(1.0).minimize(loss)
opt2 = ClippedGDOptimizer(1.0).minimize(loss)