def run_experiment(args): """ Performing experiment for the given arguments """ dataset, data = load_data(args.dataset) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Define Model encoder = create_encoder(args.model, dataset.num_features, args.latent_dim).to(device) decoder = create_decoder(args.decoder).to(device) if args.model == 'GAE': model = GAE(encoder=encoder, decoder=decoder).to(device) else: model = VGAE(encoder=encoder, decoder=decoder).to(device) # Split edges of a torch_geometric.data.Data object into pos negative train/val/test edges # default ratios of positive edges: val_ratio=0.05, test_ratio=0.1 print("Data.edge_index.size", data.edge_index.size(1)) data = model.split_edges(data) node_features, train_pos_edge_index = data.x.to( device), data.train_pos_edge_index.to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.01) def train_epoch(): """ Performing training over a single epoch and optimize over loss :return: log - loss of training loss """ # Todo: Add logging of results model.train() optimizer.zero_grad() # Compute latent embedding Z latent_embeddings = model.encode(node_features, train_pos_edge_index) # Calculate loss and loss = model.recon_loss(latent_embeddings, train_pos_edge_index) if args.model in ['VGAE']: loss = loss + (1 / data.num_nodes) * model.kl_loss() # Compute gradients loss.backward() # Perform optimization step optimizer.step() # print("Train-Epoch: {} Loss: {}".format(epoch, loss)) # ToDo: Add logging via Tensorboard log = {'loss': loss} return log def test(pos_edge_index, neg_edge_index): model.eval() with torch.no_grad(): # compute latent var z = model.encode(node_features, train_pos_edge_index) # model.test return - AUC, AP return model.test(z, pos_edge_index, neg_edge_index) def test_naive_graph(z, sample_size=1000): if args.sample_dense_evaluation: graph_type = "sampled" z_sample, index_mapping = sample_graph(z, sample_size) t = time.time() adjacency = model.decoder.forward_all( z_sample, sigmoid=(args.decoder == 'dot')) else: graph_type = "full" t = time.time() adjacency = model.decoder.forward_all( z, sigmoid=(args.decoder == 'dot')) print(f"Computing {graph_type} graph took {time.time() - t} seconds.") print( f"Adjacency matrix takes {adjacency.element_size() * adjacency.nelement() / 10 ** 6} MB of memory." ) if args.min_sim_absolute_value is None: args.min_sim_absolute_value, _ = sample_percentile( args.min_sim, adjacency, dist_measure=args.decoder, sample_size=sample_size) if args.sample_dense_evaluation: precision, recall = sampled_dense_precision_recall( data, adjacency, index_mapping, args.min_sim_absolute_value) else: precision, recall = dense_precision_recall( data, adjacency, args.min_sim_absolute_value) print("Predicted {} adjacency matrix has precision {} and recall {}!". format(graph_type, precision, recall)) return precision, recall def sample_graph(z, sample_size): N, D = z.shape sample_size = min(sample_size, N) sample_ix = np.random.choice(np.arange(N), size=sample_size, replace=False) # Returns the sampled embeddings, and a mapping from their indices to the originals return z[sample_ix], {i: sample_ix[i] for i in np.arange(sample_size)} def test_compare_lsh_naive_graphs(z, assure_correctness=True): """ :param z: :param assure_correctness: :return: """ # Naive Adjacency-Matrix (Non-LSH-Version) t = time.time() # Don't use sigmoid in order to directly compare thresholds with LSH naive_adjacency = model.decoder.forward_all( z, sigmoid=(args.decoder == 'dot')) naive_time = time.time() - t naive_size = naive_adjacency.element_size() * naive_adjacency.nelement( ) / 10**6 if args.min_sim_absolute_value is None: args.min_sim_absolute_value, _ = sample_percentile( args.min_sim, z, dist_measure=args.decoder) print( "______________________________Naive Graph Computation KPI____________________________________________" ) print(f"Computing naive graph took {naive_time} seconds.") print(f"Naive adjacency matrix takes {naive_size} MB of memory.") # LSH-Adjacency-Matrix: t = time.time() lsh_adjacency = LSHDecoder(bands=args.lsh_bands, rows=args.lsh_rows, verbose=True, assure_correctness=assure_correctness, sim_thresh=args.min_sim_absolute_value)(z) lsh_time = time.time() - t lsh_size = lsh_adjacency.element_size() * lsh_adjacency._nnz() / 10**6 print( "__________________________________LSH Graph Computation KPI__________________________________________" ) print(f"Computing LSH graph took {lsh_time} seconds.") print(f"Sparse adjacency matrix takes {lsh_size} MB of memory.") print( "________________________________________Precision-Recall_____________________________________________" ) # 1) Evaluation: Both Adjacency matrices against ground truth graph naive_precision, naive_recall = dense_precision_recall( data, naive_adjacency, args.min_sim_absolute_value) lsh_precision, lsh_recall = sparse_precision_recall( data, lsh_adjacency) print( f"Naive-Precision {naive_precision}; Naive-Recall {naive_recall}") print(f"LSH-Precision {lsh_precision}; LSH-Recall {lsh_recall}") print( "_____________________________Comparison Sparse vs Dense______________________________________________" ) # 2) Evation: Compare both adjacency matrices against each other compare_precision, compare_recall = sparse_v_dense_precision_recall( naive_adjacency, lsh_adjacency, args.min_sim_absolute_value) print( f"LSH sparse matrix has {compare_precision} precision and {compare_recall} recall w.r.t. the naively generated dense matrix!" ) return naive_precision, naive_recall, naive_time, naive_size, lsh_precision, lsh_recall, lsh_time, lsh_size, compare_precision, compare_recall # Training routine early_stopping = EarlyStopping(args.use_early_stopping, patience=args.early_stopping_patience, verbose=True) logs = [] if args.load_model and os.path.isfile("checkpoint.pt"): print("Loading model from savefile...") model.load_state_dict(torch.load("checkpoint.pt")) if not (args.load_model and args.early_stopping_patience == 0): for epoch in range(1, args.epochs): log = train_epoch() logs.append(log) # Validation metrics val_auc, val_ap = test(data.val_pos_edge_index, data.val_neg_edge_index) print('Validation-Epoch: {:03d}, AUC: {:.4f}, AP: {:.4f}'.format( epoch, val_auc, val_ap)) # Stop training if validation scores have not improved early_stopping(val_ap, model) if early_stopping.early_stop: print("Applying early-stopping") break else: epoch = 0 # Load best encoder print("Load best model for evaluation.") model.load_state_dict(torch.load('checkpoint.pt')) print( "__________________________________________________________________________" ) # Training is finished, calculate test metrics test_auc, test_ap = test(data.test_pos_edge_index, data.test_neg_edge_index) print('Test Results: {:03d}, AUC: {:.4f}, AP: {:.4f}'.format( epoch, test_auc, test_ap)) # Check if early stopping was applied or not - if not: model might not be done with training if args.epochs == epoch + 1: print("Model might need more epochs - Increase number of Epochs!") # Evaluate full graph latent_embeddings = model.encode(node_features, train_pos_edge_index) # Save embeddings to embeddings folder if flag is set if args.save_embeddings: embeddings_folder = osp.join(osp.dirname(osp.abspath(__file__)), 'embeddings') if not osp.isdir(embeddings_folder): os.makedirs(embeddings_folder) torch.save( latent_embeddings, osp.join(embeddings_folder, args.dataset + "_" + args.decoder + ".pt")) if not args.lsh: # Compute precision recall w.r.t the ground truth graph graph_precision, graph_recall = test_naive_graph(latent_embeddings) del model del encoder del decoder torch.cuda.empty_cache() else: # Precision w.r.t. the generated graph naive_precision, naive_recall, naive_time, naive_size, lsh_precision, \ lsh_recall, lsh_time, lsh_size, \ compare_precision, compare_recall = test_compare_lsh_naive_graphs( latent_embeddings) del model del encoder del decoder torch.cuda.empty_cache() return { 'args': args, 'test_auc': test_auc, 'test_ap': test_ap, 'naive_precision': naive_precision, 'naive_recall': naive_recall, 'naive_time': naive_time, 'naive_size': naive_size, 'lsh_precision': lsh_precision, 'lsh_recall': lsh_recall, 'lsh_time': lsh_time, 'lsh_size': lsh_size, 'compare_precision': compare_precision, 'compare_recall': compare_recall }
class UnsGAE(object): def __init__(self, data, embed_dim, **kwargs): super(UnsGAE, self).__init__() self.data = data self.input_dim = self.data.dim self.embed_dim = embed_dim # for now, we only work with 2-layer encoders self.hidden_dim = kwargs.get('hidden_dim', 2*embed_dim) self.encoder = kwargs.get('encoder', batched_SAGEEncoder) self.encoder = self.encoder(self.input_dim, self.hidden_dim, self.embed_dim) self.model = GAE(self.encoder) # preparing the device device = kwargs.get('device', 'cuda') if device=='gpu' and not(torch.cuda.is_available()): print('CUDA is not available in PyTorch. the model ' +\ 'will be initiated on CPU.') device = 'cpu' self.device = torch.device(device) def init_model(self, sizes, weights_path=None): self.model = self.model.to(self.device) # sizes are directly used for initializing the model # but it will be used for every feed-forward as the # sampling size of the neighbors assert len(sizes)==self.model.encoder.num_layers, \ 'Number of sizes should be equal to the number of layers in the encoder.' self.sizes = sizes if not(hasattr(self.data, 'loader')): self.data.get_neighbor_sampler(self.sizes) if weights_path is not None: self.model.load_state_dict(torch.load(weights_path, map_location=self.device)) def init_training(self, neg_num, optim='Adam', lr=1e-5, smooth_par=0.75): if optim=='Adam': self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr) elif optim=='SGD': self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr) self.train_one_epoch = self._train_edge_batching self.neg_num = neg_num if not(hasattr(self.data, 'pos_pairs')): assert 'pos_samples_path' in kwargs, 'The provided data does ' +\ 'not come with positive pairs, and we need a path to the ' +\ 'already selected positive samples. You can provide it through ' +\ 'input pos_samples_path .' include_nodes = kwargs.get('include_nodes', None) self.data.load_positive_pairs(kwargs['pos_samples_path'], include_nodes) if not(hasattr(self.data, 'neg_sampler')): #smooth_par = kwargs.get('smooth_par', 0.75) self.data.get_negative_sampler(smooth_par) if not(hasattr(self.data, 'x_all')): self.data._fetch_node_features() def init_validation(self): if not(hasattr(self.data, 'x_all')): self.data._fetch_node_features() def embed_some(self, sample_inds, b=100): """This will be used in the training, when the embedding of a batch of samples are needed """ quot, rem = np.divmod(len(sample_inds), b) Z = [] for i in range(quot+1): if i<quot: b_ids = sample_inds[i*b:(i+1)*b] elif rem>0: b_ids = sample_inds[i*b:] # neighbor-sampling for each sample _, n_id, adjs = self.data.train_loader.sample(b_ids) adjs = [adj.to(self.device) for adj in adjs] # get feature vectors through the neighbors sampled above batch_X = torch.from_numpy(self.data.get_node_features(n_id)) batch_X = batch_X.to(torch.float).to(self.device) # the encoder's output as the embedding try: batch_Z = self.model.encoder(batch_X, adjs) except: pdb.set_trace() Z += [batch_Z] Z = torch.cat(Z, dim=0) return Z def embed_all(self): L = self.model.encoder.num_layers pbar = tqdm(total=self.data.n_x * L, position=0, leave=True) pbar.set_description('Evaluating') self.model.encoder.eval() # inference is used in the evaluation stage (not in training) when # the embeddings for "all" nodes will be computed. It's written in a way # that is faster than the foward-passing function which is mostly used # for single batches in the training with torch.no_grad(): for i in range(L): xs = [] for batch_size, n_id, adj in self.data.test_loader: edge_index, _, size = adj.to(self.device) if i==0: x = torch.from_numpy(self.data.get_node_features(n_id)) x = x.to(torch.float).to(self.device) else: x = x_all[n_id,:].to(self.device) x_target = x[:size[1]] x = self.model.encoder.convs[i]((x,x_target), edge_index) if i != L-1: x = F.relu(x) xs.append(x[:batch_size,:].cpu()) pbar.update(batch_size) x_all = torch.cat(xs, dim=0) pbar.close() return x_all def _train_edge_batching(self, ep, batch_size=5000): assert hasattr(self.data, 'pos_pairs'), 'Positive and negative ' + \ 'samples must be generated before starting the training' self.model.train() neg_num = self.neg_num torch.multiprocessing.set_sharing_strategy('file_system') pbar = tqdm(total=self.data.pos_pairs.shape[1], position=0, leave=True) pbar.set_description(f'Epoch {ep:02d}') total_loss = 0 np.random.shuffle(self.data.pos_pairs.T) quot, rem = np.divmod(self.data.pos_pairs.shape[1], batch_size) for i in range(quot+1): # positive mini-batch # (#: batch size) if i<quot: batch_pos_pairs = self.data.pos_pairs[:,i*batch_size:(i+1)*batch_size] else: batch_pos_pairs = self.data.pos_pairs[:,i*batch_size:] batch_pos_samples, pos_edge_index = np.unique(batch_pos_pairs, return_inverse=True) pos_edge_index = pos_edge_index.reshape(batch_pos_pairs.shape) # negative mini-batch # (#: batch_size * neg_num) batch_neg_samples = self.data.neg_sampler.sample( torch.Size([neg_num*batch_pos_pairs.shape[1]])) neg_edge_index = np.array([np.repeat(pos_edge_index[0,:],neg_num), np.arange(pos_edge_index.max()+1, pos_edge_index.max()+len(batch_neg_samples)+1)]) # embeddings of the nodes involved in + and - edges self.optimizer.zero_grad() unodes = batch_pos_samples.tolist() + batch_neg_samples.tolist() Z = self.embed_some(unodes) # reconstruction loss pos_edge_index = torch.from_numpy(pos_edge_index).to(self.device) neg_edge_index = torch.from_numpy(neg_edge_index).to(self.device) loss = self.model.recon_loss(Z, pos_edge_index, neg_edge_index) loss.backward() self.optimizer.step() total_loss += float(loss) pbar.update(batch_size) pbar.close() loss = total_loss / (quot+1) return loss def validate(self): self.model.eval() Z = self.embed_all() ents_Z = Z[:-1,:][self.data.selected_inds[:-1]>=self.data.nA,:].detach().numpy() prop_Z = Z[self.data.tags=='prop',:].detach().numpy() scores = np.dot(ents_Z, prop_Z.T).squeeze() sorted_ents = self.data.selected_ents[np.argsort(-scores)] unstudied_sorted_ents = np.array([x for x in sorted_ents if x not in self.data.studied_ents]) preds = unstudied_sorted_ents[:50] prec = np.isin(preds,self.data.GT).sum() / len(preds) return prec