pos_label = torch.ones(size, dtype=torch.float, device=device) neg_label = torch.zeros(size, dtype=torch.float, device=device) optimizer.zero_grad() tgan = tgan.train() pos_prob, neg_prob = tgan.contrast(src_l_cut, dst_l_cut, dst_l_fake, ts_l_cut, NUM_NEIGHBORS) loss = criterion(pos_prob, pos_label) loss += criterion(neg_prob, neg_label) loss.backward() optimizer.step() # get training results with torch.no_grad(): tgan = tgan.eval() pred_score = np.concatenate([(pos_prob).cpu().detach().numpy(), (neg_prob).cpu().detach().numpy()]) pred_label = pred_score > 0.5 true_label = np.concatenate([np.ones(size), np.zeros(size)]) acc = accuracy_score(true_label, pred_label) ap = average_precision_score(true_label, pred_label) f1 = f1_score(true_label, pred_label) auc = roc_auc_score(true_label, pred_score) batch_bar.set_postfix(loss=loss.item(), acc=acc, f1=f1, auc=auc) # validation phase use all information tgan.ngh_finder = full_ngh_finder val_acc, val_ap, val_f1, val_auc = eval_one_epoch('val for old nodes', tgan, val_src_l, val_dst_l, val_ts_l,
full_adj_list[dst].append((src, eidx, ts)) full_ngh_finder = NeighborFinder(full_adj_list, uniform=UNIFORM) ### Model initialize device = torch.device('cuda:{}'.format(GPU)) tgan = TGAN(full_ngh_finder, n_feat, e_feat, num_layers=NUM_LAYER, use_time=USE_TIME, agg_method=AGG_METHOD, attn_mode=ATTN_MODE, seq_len=SEQ_LEN, n_head=NUM_HEADS, drop_out=DROP_OUT, node_dim=NODE_DIM, time_dim=TIME_DIM) # optimizer = torch.optim.Adam(tgan.parameters(), lr=LEARNING_RATE) # criterion = torch.nn.BCELoss() tgan = tgan.to(device) #logger.info('loading saved TGAN model') model_path = f'./saved_models/{args.prefix}-{args.agg_method}-{args.attn_mode}-{DATA}.pth' tgan.load_state_dict(torch.load(model_path)) tgan.eval() #logger.info('TGAN models loaded') #logger.info('TGAN starts generating representations') with torch.no_grad(): tgan.eval() src_embed = tgan.tem_conv(src_l, ts_l, NODE_LAYER) tgt_embed = tgan.tem_conv(dst_l, ts_l, NODE_LAYER) g_df['user_embed'] = [emb[0] for emb in src_embed.tolist()] g_df['tgt_embed'] = [emb[0] for emb in tgt_embed.tolist()] inv_i_dict = {v: k for k, v in i_dict.items()} tgtpg_item = [inv_i_dict.get(i) for i in target_pages] tgt_emb = [g_df.loc[orig_df['item'] == item, 'tgt_embed'].iloc[0] for item in tgtpg_item] g_df = g_df.groupby('u').tail(1)