class LinearDecoder(Decoder): """ MLP Decoder for Hyperbolic/Euclidean node classification models. """ def __init__(self, c, args): super(LinearDecoder, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() self.input_dim = args.dim self.output_dim = args.n_classes self.bias = args.bias self.cls = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias) self.decode_adj = False def decode(self, x, adj): h = self.manifold.proj_tan0(self.manifold.logmap0(x, c=self.c), c=self.c) return super(LinearDecoder, self).decode(h, adj) def extra_repr(self): return 'in_features={}, out_features={}, bias={}, c={}'.format( self.input_dim, self.output_dim, self.bias, self.c) def reset_parameters(self): self.cls.reset_parameters() print('GNN classification decoder reset finished')
def __init__(self, c, args): super(LinearDecoder, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() self.input_dim = args.dim self.output_dim = args.n_classes self.bias = args.bias self.cls = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias) self.decode_adj = False
def __init__(self, c, args): super(Shallow, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() self.use_feats = args.use_feats weights = torch.Tensor(args.n_nodes, args.dim) if not args.pretrained_embeddings: weights = self.manifold.init_weights(weights, self.c) trainable = True else: weights = torch.Tensor(np.load(args.pretrained_embeddings)) assert weights.shape[ 0] == args.n_nodes, "The embeddings you passed seem to be for another dataset." trainable = False self.lt = manifolds.ManifoldParameter(weights, trainable, self.manifold, self.c) self.all_nodes = torch.LongTensor(list(range(args.n_nodes))) layers = [] if args.pretrained_embeddings is not None and args.num_layers > 0: # MLP layers after pre-trained embeddings dims, acts = get_dim_act(args) if self.use_feats: dims[0] = args.feat_dim + weights.shape[1] else: dims[0] = weights.shape[1] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] layers.append( Linear(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*layers) self.encode_graph = False
def __init__(self, c, args): super(LinearDecoder, self).__init__(c) if args.manifold == 'MixedCurvature': self.manifold = getattr(manifolds, args.manifold)(args.split_idx) else: self.manifold = getattr(manifolds, args.manifold)() self.input_dim = args.dim self.output_dim = args.n_classes self.bias = args.bias self.cls = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias) self.decode_adj = False
def __init__(self, c, args): super(MLP, self).__init__(c) assert args.num_layers > 0 dims, acts = get_dim_act(args) layers = [] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] layers.append(Linear(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*layers) self.encode_graph = False
def __init__(self, c, args, task): super(HGCAEDecoder, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() if task == 'nc': self.input_dim = args.dim self.output_dim = args.n_classes self.bias = args.bias self.classifier = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias) self.decode_adj = False elif task == 'rec': assert args.num_layers > 0 dims, acts, _ = hyp_layers.get_dim_act_curv(args) dims = dims[::-1] acts = acts[::-1][:-1] + [lambda x: x] # Last layer without act self.curvatures = self.c[::-1] encdec_share_curvature = False if not encdec_share_curvature and args.num_layers == args.num_dec_layers: # do not share and enc-dec mirror-shape num_c = len(self.curvatures) self.curvatures = self.curvatures[:1] if args.c_trainable == 1: self.curvatures += [nn.Parameter(torch.Tensor([args.c]).to(args.device))] * (num_c - 1) else: self.curvatures += [torch.tensor([args.c])] * (num_c - 1) if not args.cuda == -1: self.curvatures = [curv.to(args.device) for curv in self.curvatures] self.curvatures = self.curvatures[:-1] + [None] hgc_layers = [] num_dec_layers = args.num_dec_layers for i in range(num_dec_layers): c_in, c_out = self.curvatures[i], self.curvatures[i + 1] in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] hgc_layers.append( hyp_layers.HyperbolicGraphConvolution( self.manifold, in_dim, out_dim, c_in, c_out, args.dropout, act, args.bias, args.use_att, att_type=args.att_type, att_logit=args.att_logit, beta=args.beta, decode=True ) ) self.decoder = nn.Sequential(*hgc_layers) self.decode_adj = True else: raise RuntimeError('Unknown task')
def __init__(self, c, args, task): super(HNNDecoder, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() if not args.cuda == -1: c = torch.Tensor([c]).to(args.device) if task == 'nc': self.input_dim = args.dim self.output_dim = args.n_classes self.bias = args.bias self.classifier = Linear(self.input_dim, self.output_dim, args.dropout, lambda x: x, self.bias) self.decode_adj = False elif task == 'rec': assert args.num_layers > 0 dims, acts, _ = hyp_layers.get_dim_act_curv(args) dims = dims[::-1] acts = acts[::-1][:-1] + [lambda x: x] # Last layer without act encdec_share_curvature = False hnn_layers = [] num_dec_layers = args.num_dec_layers for i in range(num_dec_layers): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] c_in = c c_out = None if (i == num_dec_layers - 1) else c hnn_layers.append( hyp_layers.HNNLayer( self.manifold, in_dim, out_dim, c_in, c_out, args.dropout, act, args.bias ) ) self.decoder = nn.Sequential(*hnn_layers) self.decode_adj = False else: raise RuntimeError('Unknown task')
def __init__(self, c, args): super(Shallow, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() self.use_feats = args.use_feats self.pretrained_embeddings = args.pretrained_embeddings self.n_nodes = args.n_nodes self.weights = torch.Tensor(args.n_nodes, args.dim) layers = [] if args.pretrained_embeddings is not None and args.num_layers > 0: # MLP layers after pre-trained embeddings dims, acts = get_dim_act(args) if self.use_feats: dims[0] = args.feat_dim + self.weights.shape[1] else: dims[0] = self.weights.shape[1] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] layers.append( Linear(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*layers) self.reset_parameteres() self.encode_graph = False