def ncp_run(N1, N2, N3, gR, dR, time): # ncp test X = synthetic_data_cp([N1, N2, N3], gR, 0) data_provider = Provider() data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/ncp_' + str(N1)) ncp = NCP_BCU(env) args = NCP_BCU.NCP_Args(rank=dR, validation_internal=200) ncp.build_model(args) print('\n\nNCP with %dx%dx%d, gR=%d, dR=%d, time=%d' % (N1, N2, N3, gR, dR, time)) loss_hist = ncp.train(6000) scale = str(N1) + '_' + str(gR) + '_' + str(dR) out_path = '/root/tensorD_f/data_out_tmp/python_out/ncp_' + scale + '_' + str(time) + '.txt' with open(out_path, 'w') as out: for loss in loss_hist: out.write('%.6f\n' % loss)
def cp_run(N1, N2, N3, gR, dR, time): # cp test X = synthetic_data_cp([N1, N2, N3], gR, 0) data_provider = Provider() data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/cp_' + str(N1)) cp = CP_ALS(env) args = CP_ALS.CP_Args(rank=dR, validation_internal=50, tol=1.0e-4) cp.build_model(args) print('CP with %dx%dx%d, gR=%d, dR=%d, time=%d' % (N1, N2, N3, gR, dR, time)) hist = cp.train(600) scale = str(N1) + '_' + str(gR) + '_' + str(dR) out_path = '/root/tensorD_f/data_out_tmp/python_out/cp_' + scale + '_' + str(time) + '.txt' with open(out_path, 'w') as out: for iter in hist: loss = iter[0] rel_res = iter[1] out.write('%.10f, %.10f\n' % (loss, rel_res))
def tucker_run(N1, N2, N3, gR, dR, time): # tucker X = synthetic_data_tucker([N1, N2, N3], [gR, gR, gR]) data_provider = Provider() data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/tucker_' + str(N1)) hooi = HOOI(env) args = HOOI.HOOI_Args(ranks=[dR, dR, dR], validation_internal=200) hooi.build_model(args) print('\n\nTucker with %dx%dx%d, gR=%d, dR=%d, time=%d' % (N1, N2, N3, gR, dR, time)) loss_hist = hooi.train(6000) scale = str(N1) + '_' + str(gR) + '_' + str(dR) out_path = '/root/tensorD_f/data_out_tmp/python_out/tucker_' + scale + '_' + str( time) + '.txt' with open(out_path, 'w') as out: for loss in loss_hist: out.write('%.6f\n' % loss)
def ntucker_run(N1, N2, N3, gR, dR, time): # ntucker X = synthetic_data_tucker([N1, N2, N3], [gR, gR, gR], 0) data_provider = Provider() data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/ntucker_' + str(N1)) ntucker = NTUCKER_BCU(env) args = NTUCKER_BCU.NTUCKER_Args(ranks=[dR, dR, dR], validation_internal=500, tol=1.0e-4) ntucker.build_model(args) print('\n\nNTucker with %dx%dx%d, gR=%d, dR=%d, time=%d' % (N1, N2, N3, gR, dR, time)) loss_hist = ntucker.train(10000) scale = str(N1) + '_' + str(gR) + '_' + str(dR) out_path = '/root/tensorD_f/data_out_tmp/python_out/ntucker_' + scale + '_' + str( time) + '.txt' with open(out_path, 'w') as out: for loss in loss_hist: out.write('%.6f\n' % loss)
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/10/3 PM4:16 # @Author : Shiloh Leung # @Site : # @File : ntucker_demo.py # @Software: PyCharm Community Edition import tensorflow as tf from tensorD.factorization.env import Environment from tensorD.dataproc.provider import Provider from tensorD.factorization.ntucker import NTUCKER_BCU from tensorD.demo.DataGenerator import * if __name__ == '__main__': print('=========Train=========') X = synthetic_data_tucker([20, 20, 20], [10, 10, 10]) data_provider = Provider() data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/ntucker_demo') ntucker = NTUCKER_BCU(env) args = NTUCKER_BCU.NTUCKER_Args(ranks=[10, 10, 10], validation_internal=10) ntucker.build_model(args) ntucker.train(2000) factor_matrices = ntucker.factors core_tensor = ntucker.core print('Train ends.\n\n\n')
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/9/15 PM10:53 # @Author : Shiloh Leung # @Site : # @File : ncp_test.py # @Software: PyCharm Community Edition from tensorD.factorization.env import Environment from tensorD.factorization.ncp import NCP_BCU from tensorD.dataproc.provider import Provider import tensorflow as tf import numpy as np if __name__ == '__main__': data_provider = Provider() X = np.arange(60).reshape(3, 4, 5) data_provider.full_tensor = lambda: X env = Environment(data_provider, summary_path='/tmp/tensord') ncp = NCP_BCU(env) args = NCP_BCU.NCP_Args(rank=2, validation_internal=5) ncp.build_model(args) ncp.train(500)
from tensorD.dataproc.reader import TensorReader from tensorD.factorization.env import Environment from tensorD.dataproc.provider import Provider from tensorD.factorization.tucker import HOOI from tensorD.factorization.tucker import HOSVD from tensorD.demo.DataGenerator import * if __name__ == '__main__': full_shape = [943, 1682, 31] base = TensorReader('/root/tensorD_f/data_out_tmp/u1.base.csv') base.read(full_shape=full_shape) with tf.Session() as sess: rating_tensor = sess.run(base.full_data) data_provider = Provider() data_provider.full_tensor = lambda: rating_tensor env = Environment(data_provider, summary_path='/tmp/tucker_ml') hooi = HOOI(env) args = HOOI.HOOI_Args(ranks=[20, 20, 20], validation_internal=1) hooi.build_model(args) hist = hooi.train(100) out_path = '/root/tensorD_f/data_out_tmp/python_out/hooi_ml_20.txt' with open(out_path, 'w') as out: for iter in hist: loss = iter[0] rel_res = iter[1] out.write('%.10f, %.10f\n' % (loss, rel_res))