def __init__(self, c, args): super(Shallow, self).__init__(c) self.manifold = getattr(manifolds, args.manifold)() self.use_feats = args.use_feats weights = torch.Tensor(args.n_nodes, args.dim) if not args.pretrained_embeddings: weights = self.manifold.init_weights(weights, self.c) trainable = True else: weights = torch.Tensor(np.load(args.pretrained_embeddings)) assert weights.shape[ 0] == args.n_nodes, "The embeddings you passed seem to be for another dataset." trainable = False self.lt = manifolds.ManifoldParameter(weights, trainable, self.manifold, self.c) self.all_nodes = torch.LongTensor(list(range(args.n_nodes))) layers = [] if args.pretrained_embeddings is not None and args.num_layers > 0: # MLP layers after pre-trained embeddings dims, acts = get_dim_act(args) if self.use_feats: dims[0] = args.feat_dim + weights.shape[1] else: dims[0] = weights.shape[1] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] layers.append( Linear(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*layers) self.encode_graph = False
def __init__(self, c, args): super(MLP, self).__init__(c) assert args.num_layers > 0 dims, acts = get_dim_act(args) layers = [] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] layers.append(Linear(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*layers) self.encode_graph = False
def __init__(self, c, args): super(DeepGCN, self).__init__(c) assert args.num_layers > 0 dims, acts = get_dim_act(args) gc_layers = [] gc_layers.append( GraphConvolution(dims[0], dims[1], args.dropout, acts[0], args.bias)) gc_layers.append( OriRevLayer(dims, dims[0], dims[1], args.dropout, acts[0], args.bias)) self.layers = nn.Sequential(*gc_layers) self.encode_graph = True
def __init__(self, c, args): super(GCN, self).__init__(c) assert args.num_layers > 0 dims, acts = get_dim_act(args) gc_layers = [] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] gc_layers.append( GraphConvolution(in_dim, out_dim, args.dropout, act, args.bias)) self.layers = nn.Sequential(*gc_layers) self.encode_graph = True
def __init__(self, c, args): super(GAT, self).__init__(c) assert args.num_layers > 0 dims, acts = get_dim_act(args) gat_layers = [] for i in range(len(dims) - 1): in_dim, out_dim = dims[i], dims[i + 1] act = acts[i] assert dims[i + 1] % args.n_heads == 0 out_dim = dims[i + 1] // args.n_heads concat = True gat_layers.append( GraphAttentionLayer(in_dim, out_dim, args.dropout, act, args.alpha, args.n_heads, concat)) self.layers = nn.Sequential(*gat_layers) self.encode_graph = True