def run(): args = NGCF_Args(parse_args()) # 获取训练集的dataloader形式 data = DATA(args.data_path, args.dataset_name) train_set, train_U2I, test_U2I, edge_indices, edge_weight, n_users, n_items = data.load( ) train_dl = get_loader(train_set, train_U2I, n_items, args.batch_size, args.cores) # 获取归一化的拉普拉斯矩阵 laplace_graph = Graph(edge_indices, edge_weight) laplace_graph.add_self_loop() laplace_graph.norm() norm_adj = laplace_graph.mat.cuda() # 定义网络 model = NGCF(n_users, n_items, norm_adj, args) model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) # 定义会话 sess = Session(model) for epoch in range(args.num_epochs): loss, mf_loss, emb_loss = sess.train(train_dl, optimizer) print("epoch: {:d}, loss = [{:.6f} == {:.6f} + {:.6f}]".format( epoch, loss, mf_loss, emb_loss)) perf_info = evaluate(model, n_users, n_items, train_U2I, test_U2I, args) print("precision: [{:.6f}] recall: [{:.6f}] ndcg: [{:.6f}]".format( perf_info[0], perf_info[1], perf_info[2]))
def run(): args = BPRMF_Args(parse_args()) # 获取训练集的dataloader形式 data = DATA(args.data_path, args.dataset_name) train_set, train_U2I, test_U2I, n_users, n_items = data.load() train_dl = get_loader(train_set, train_U2I, n_items, args.batch_size, args.cores) # 定义网络 model = BPRMF(n_users, n_items, args) model = model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) # 定义会话 sess = Session(model) for epoch in range(args.num_epochs): loss, mf_loss, emb_loss = sess.train(train_dl, optimizer) print("epoch: {:d}, loss = [{:.6f} == {:.6f} + {:.6f}]".format( epoch, loss, mf_loss, emb_loss)) perf_info = evaluate(model, n_users, n_items, train_U2I, test_U2I, args) print("precision: [{:.6f}] recall: [{:.6f}] ndcg: [{:.6f}]".format( perf_info[0], perf_info[1], perf_info[2]))
from dataset import MRIDataset as DATA else: from dataset import MRIDataset_threechannel as DATA if args.network == 'Inception_v3': from Inception_v3 import inception_v3_pretrain as MODEL Transform = transforms.Compose( [transforms.Resize((SIZE, SIZE)), transforms.ToTensor()]) if __name__ == '__main__': # writer = SummaryWriter(path_to_logs_dir) dataset = DATA(path_to_data, path_to_label, mode=MODE, transform=Transform, aug=True) weight = 1. / torch.tensor([dataset.negative, dataset.positive], dtype=torch.float) target = torch.tensor(dataset._label['label'], dtype=torch.long) sample_weight = torch.tensor([weight[t] for t in target], dtype=torch.float) sampler = WeightedRandomSampler(sample_weight, len(sample_weight)) dataloader = DataLoader(dataset, Batch_size, sampler=sampler, num_workers=1, drop_last=True) dataset_test = DATA(path_to_testdata,