def prepare_sample(self, sample): sample = u.Namespace(sample) for i, adj in enumerate(sample.hist_adj_list): adj = u.sparse_prepare_tensor(adj, torch_size=[self.num_nodes]) sample.hist_adj_list[i] = adj.to(self.args.device) nodes = self.tasker.prepare_node_feats(sample.hist_ndFeats_list[i]) sample.hist_ndFeats_list[i] = nodes.to(self.args.device) node_mask = sample.node_mask_list[i] sample.node_mask_list[i] = node_mask.to(self.args.device).t( ) #transposed to have same dimensions as scorer label_sp = self.ignore_batch_dim(sample.label_sp) if self.args.task in ["link_pred", "edge_cls"]: label_sp['idx'] = label_sp['idx'].to(self.args.device).t( ) ####### ALDO TO CHECK why there was the .t() -----> because I concatenate embeddings when there are pairs of them, the embeddings are row vectors after the transpose else: label_sp['idx'] = label_sp['idx'].to(self.args.device) label_sp['vals'] = label_sp['vals'].type(torch.long).to( self.args.device) sample.label_sp = label_sp return sample
def __init__(self, args, dataset): self.data = dataset self.args = args self.num_classes = 3 self.adj_matrix = tu.get_static_sp_adj(edges=self.data.edges, weighted=False) if args.use_2_hot_node_feats: max_deg_out, max_deg_in = tu.get_max_degs_static( self.data.num_nodes, self.adj_matrix) self.feats_per_node = max_deg_out + max_deg_in #print ('feats_per_node',self.feats_per_node ,max_deg_out, max_deg_in) self.nodes_feats = tu.get_2_hot_deg_feats(self.adj_matrix, max_deg_out, max_deg_in, dataset.num_nodes) #print('XXXX self.nodes_feats',self.nodes_feats) self.nodes_feats = u.sparse_prepare_tensor( self.nodes_feats, torch_size=[self.data.num_nodes, self.feats_per_node], ignore_batch_dim=False) else: self.feats_per_node = dataset.feats_per_node self.nodes_feats = self.data.node_feats self.adj_matrix = tu.normalize_adj(adj=self.adj_matrix, num_nodes=self.data.num_nodes) self.is_static = True
def __init__(self, args, splitter, gcn, classifier, comp_loss, dataset, num_classes): self.args = args self.splitter = splitter self.tasker = splitter.tasker self.gcn = gcn self.classifier = classifier self.comp_loss = comp_loss self.num_nodes = dataset.num_nodes self.data = dataset self.num_classes = num_classes self.logger = logger.Logger(args, self.num_classes) self.init_optimizers(args) if self.tasker.is_static: adj_matrix = u.sparse_prepare_tensor(self.tasker.adj_matrix, torch_size=[self.num_nodes], ignore_batch_dim=False) self.hist_adj_list = [adj_matrix] self.hist_ndFeats_list = [self.tasker.nodes_feats.float()]
def prepare_node_feats(node_feats): return u.sparse_prepare_tensor(node_feats, torch_size= [dataset.num_nodes, self.feats_per_node])