def fit(self, features, adj, labels, idx_train, idx_val=None, train_iters=200, initialize=True, verbose=False, normalize=True): ''' train the gcn model, when idx_val is not None, pick the best model according to the validation loss ''' self.device = self.gc1.weight.device if initialize: self.initialize() if type(adj) is not torch.Tensor: features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) else: features = features.to(self.device) adj = adj.to(self.device) labels = labels.to(self.device) if normalize: if utils.is_sparse_tensor(adj): adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: adj_norm = utils.normalize_adj_tensor(adj) else: adj_norm = adj self.adj_norm = adj_norm self.features = features self.labels = labels if idx_val is None: self._train_without_val(labels, idx_train, train_iters, verbose) else: self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def predict(self, features=None, adj=None): """By default, the inputs should be unnormalized data Parameters ---------- features : node features. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. adj : adjcency matrix. If `features` and `adj` are not given, this function will use previous stored `features` and `adj` from training to make predictions. Returns ------- torch.FloatTensor output (log probabilities) of GCN """ self.eval() if features is None and adj is None: return self.forward(self.features, self.adj_norm) else: if type(adj) is not torch.Tensor: features, adj = utils.to_tensor(features, adj, device=self.device) self.features = features if utils.is_sparse_tensor(adj): self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: self.adj_norm = utils.normalize_adj_tensor(adj) return self.forward(self.features, self.adj_norm)
def fit(self, features, adj, labels, idx_train, idx_val=None, train_iters=200, initialize=True, verbose=False, normalize=True, patience=500, **kwargs): if initialize: self.initialize() if type(adj) is not torch.Tensor: features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) else: features = features.to(self.device) adj = adj.to(self.device) labels = labels.to(self.device) if normalize: if utils.is_sparse_tensor(adj): adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: adj_norm = utils.normalize_adj_tensor(adj) else: adj_norm = adj self.adj_norm = adj_norm self.features = features self.labels = labels if idx_val is None: self._train_without_val(labels, idx_train, train_iters, verbose) else: if patience < train_iters: self._train_with_early_stopping(labels, idx_train, idx_val, train_iters, patience, verbose) else: self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def fit(self, features, adj, labels, idx_train, idx_val=None, train_iters=200, initialize=True, verbose=True, normalize=True, patience=1000, adj2=None): ''' train the gcn model, when idx_val is not None, pick the best model according to the validation loss ''' self.device = self.gc1.weight_x.weight.device #if initialize: # self.initialize() if type(adj) is not torch.Tensor: features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) else: features = features.to(self.device) adj = adj.to(self.device) labels = labels.to(self.device) if normalize: if utils.is_sparse_tensor(adj): adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: adj_norm = utils.normalize_adj_tensor(adj) else: adj_norm = adj adj_norm = adj_norm * (1 - torch.eye(adj_norm.size(0)).cuda()) adj_norm = self.normalize_adj(adj_norm) self.adj_norm_dense = adj_norm adj_norm = to_sparse(adj_norm) self.adj_norm = adj_norm self.features = features self.labels = labels if idx_val is None: self._train_without_val(labels, idx_train, train_iters, verbose) else: if patience < train_iters: self._train_with_early_stopping(labels, idx_train, idx_val, train_iters, patience, verbose) else: self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def predict(self, features=None, adj=None): '''By default, inputs are unnormalized data''' self.eval() if features is None and adj is None: return self.forward(self.features, self.adj_norm) else: if type(adj) is not torch.Tensor: features, adj = utils.to_tensor(features, adj, device=self.device) self.features = features if utils.is_sparse_tensor(adj): self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: self.adj_norm = utils.normalize_adj_tensor(adj) return self.forward(self.features, self.adj_norm)
def fit(self, features, adj, labels, idx_train, idx_val=None, train_iters=200, initialize=True, verbose=False, normalize=True, patience=500, **kwargs): """Train the gcn model, when idx_val is not None, pick the best model according to the validation loss. Parameters ---------- features : node features adj : the adjacency matrix. The format could be torch.tensor or scipy matrix labels : node labels idx_train : node training indices idx_val : node validation indices. If not given (None), GCN training process will not adpot early stopping train_iters : int number of training epochs initialize : bool whether to initialize parameters before training verbose : bool whether to show verbose logs normalize : bool whether to normalize the input adjacency matrix. patience : int patience for early stopping, only valid when `idx_val` is given """ self.device = self.gc1.weight.device if initialize: self.initialize() if type(adj) is not torch.Tensor: features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) else: features = features.to(self.device) adj = adj.to(self.device) labels = labels.to(self.device) if normalize: if utils.is_sparse_tensor(adj): adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: adj_norm = utils.normalize_adj_tensor(adj) else: adj_norm = adj self.adj_norm = adj_norm self.features = features self.labels = labels if idx_val is None: self._train_without_val(labels, idx_train, train_iters, verbose) else: if patience < train_iters: self._train_with_early_stopping(labels, idx_train, idx_val, train_iters, patience, verbose) else: self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def fit( self, features, adj, labels, idx_train, idx_val=None, idx_test=None, train_iters=101, att_0=None, attention=False, model_name=None, initialize=True, verbose=False, normalize=False, patience=500, ): ''' train the gcn model, when idx_val is not None, pick the best model according to the validation loss ''' self.sim = None self.attention = attention if self.attention: att_0 = att_coef(features, adj) adj = att_0 # update adj self.sim = att_0 # update att_0 self.idx_test = idx_test # self.model_name = model_name # self.device = self.gc1.weight.device if initialize: self.initialize() if type(adj) is not torch.Tensor: features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device) else: features = features.to(self.device) adj = adj.to(self.device) labels = labels.to(self.device) normalize = False # we don't need normalize here, the norm is conducted in the GCN (self.gcn1) model if normalize: if utils.is_sparse_tensor(adj): adj_norm = utils.normalize_adj_tensor(adj, sparse=True) else: adj_norm = utils.normalize_adj_tensor(adj) else: adj_norm = adj """Make the coefficient D^{-1/2}(A+I)D^{-1/2}""" self.adj_norm = adj_norm self.features = features self.labels = labels if idx_val is None: self._train_without_val(labels, idx_train, train_iters, verbose) else: if patience < train_iters: self._train_with_early_stopping(labels, idx_train, idx_val, train_iters, patience, verbose) else: self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)