def train_without_pretrain(): neumf_model = NeuMF(n_user, n_item, gmf_dim=8, mlp_dim=32, layers=[32, 16, 8], l2=0) train(neumf_model, train_data, test_data, topk_data, epochs=50, batch=512)
def train_without_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2): neumf_model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2) train(neumf_model, train_data, test_data, topk_data, epochs=10, batch=512)
def train_with_pretrain(n_user, n_item, train_data, test_data, topk_data, gmf_dim, mlp_dim, layers, l2): neumf_model, gmf_model, mlp_model = NeuMF_model(n_user, n_item, gmf_dim=gmf_dim, mlp_dim=mlp_dim, layers=layers, l2=l2) print('预训练GMF部分') train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512) print('预训练MLP部分') train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512) out_kernel = tf.concat((gmf_model.get_layer('gmf_out').get_weights()[0], mlp_model.get_layer('mlp_out').get_weights()[0]), 0) out_bias = gmf_model.get_layer('gmf_out').get_weights( )[1] + mlp_model.get_layer('mlp_out').get_weights()[1] neumf_model.get_layer('out').set_weights( [out_kernel * 0.5, out_bias * 0.5]) test(neumf_model, train_data, test_data, topk_data, batch=512) train(neumf_model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.SGD(0.0001), epochs=10, batch=512)
def train_with_pretrain(): gmf_model = GMF(n_user, n_item, dim=8, l2=0) train(gmf_model, train_data, test_data, topk_data, epochs=10, batch=512) mlp_model = MLP(n_user, n_item, dim=32, layers=[32, 16, 8], l2=0) train(mlp_model, train_data, test_data, topk_data, epochs=10, batch=512) neumf_model = NeuMF(n_user, n_item, gmf_dim=8, mlp_dim=32, layers=[32, 16, 8], l2=0) neumf_model(tf.constant([[0, 0]])) combine_weights(neumf_model, gmf_model, mlp_model) train(neumf_model, train_data, test_data, topk_data, optimizer_class=None, epochs=1, batch=512) train(neumf_model, train_data, test_data, topk_data, epochs=30, batch=512)
if __name__ == '__main__': import Recommender_System.utility.gpu_memory_growth from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.MLP.model import MLP_model from Recommender_System.algorithm.train import train n_user, n_item, train_data, test_data, topk_data = data_process.pack( data_loader.ml100k) model = MLP_model(n_user, n_item, dim=32, layers=[64, 64, 64], l2=0, dropout=0.3) train(model, train_data, test_data, topk_data, epochs=30, batch=512)
if __name__ == '__main__': import Recommender_System.utility.gpu_memory_growth from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.DeepFM.model import DeepFM_model from Recommender_System.algorithm.train import train n_user, n_item, train_data, test_data, topk_data = data_process.pack(data_loader.ml100k) model = DeepFM_model(n_user, n_item, dim=8, layers=[16, 16, 16], l2=1e-5) train(model, train_data, test_data, topk_data, epochs=10)
import tensorflow as tf from Recommender_System.data import data_loader, data_process from Recommender_System.algorithm.FM.model import FM_model from Recommender_System.algorithm.GMF.model import GMF_model from Recommender_System.algorithm.LFM.model import LFM_model from Recommender_System.algorithm.MLP.model import MLP_model from Recommender_System.algorithm.NeuMF.model import NeuMF_model from Recommender_System.algorithm.DeepFM.model import DeepFM_model from Recommender_System.algorithm.train import train n_user, n_item, train_data, test_data, topk_data = data_process.pack(data_loader.ml100k) dim = 16 model = FM_model(n_user, n_item, dim=dim, l2=0) train(model, train_data, test_data, topk_data, epochs=10) model = GMF_model(n_user, n_item, dim=dim, l2=0) train(model, train_data, test_data, topk_data, epochs=10) model = LFM_model(n_user, n_item, dim=dim, l2=0) train(model, train_data, test_data, topk_data, loss_object=tf.losses.MeanSquaredError(), epochs=10) model = MLP_model(n_user, n_item, dim=dim * 2, layers=[dim * 2, dim, dim // 2], l2=0) train(model, train_data, test_data, topk_data, epochs=10) model, _, _ = NeuMF_model(n_user, n_item, gmf_dim=dim // 2, mlp_dim=dim * 2, layers=[dim * 2, dim, dim // 2], l2=0) train(model, train_data, test_data, topk_data, epochs=10) model = DeepFM_model(n_user, n_item, dim // 2, layers=[dim, dim, dim], l2=0) train(model, train_data, test_data, topk_data, epochs=10)