def __init__(self, n_in, n_h, activation): super(DGI, self).__init__() self.gcn = GCN(n_in, n_h, activation) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h) self.disc2 = Discriminator2(n_h)
def __init__(self, n_in, n_h, activation, update_rule="GCNConv", batch_size=1, K=None, drop_sigma=False): super(DGI, self).__init__() if "GraphSkip" in update_rule: self.gnn = GraphSkip.GraphSkip(n_in, n_h, activation, convolution=update_rule, K=K) # has reset parameters and activation in constructor else: self.gnn = GNNPlusAct(n_in, n_h, activation, update_rule, K=K, drop_sigma=drop_sigma) # has reset parameters and activation in constructor self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h, batch_size)
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim, neighbor_pooling_type, device): super(DGI, self).__init__() self.gin = GraphCNN(num_layers, num_mlp_layers, input_dim, hidden_dim, neighbor_pooling_type, device) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(hidden_dim)
def __init__(self, nfeat, nhid, shid, P, act): super(DGI, self).__init__() self.hgcn = HGCN(nfeat, nhid, shid, P, act) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(nhid)
def __init__(self, n_nb, n_in, n_h, activation, num_clusters, beta, graph): super(GIC_GIN, self).__init__() self.gcn = GINNet(net_params=[n_in, 512, n_h], graph=graph) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h) self.disc_c = Discriminator_cluster(n_h, n_h, n_nb, num_clusters) self.beta = beta self.cluster = Clusterator(n_h, num_clusters)
def __init__(self, n_nb, n_in, n_h, activation, num_clusters, beta, adj): super(GIC_GCN, self).__init__() self.gcn = net_gcn_baseline(embedding_dim=[n_in, 512, n_h], adj=adj) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h) self.disc_c = Discriminator_cluster(n_h, n_h, n_nb, num_clusters) self.beta = beta self.cluster = Clusterator(n_h, num_clusters)
def __init__(self, features, adj_lists, ft_size, n_h, activation, num_sample=[10, 10], skip_connection=False, gcn=True): super(DGI_ind, self).__init__() self.features = features self.skip_connection = skip_connection self.agg1 = MeanAggregator(features, cuda=torch.cuda.is_available(), gcn=gcn, name='l1') self.enc1 = Encoder(features, ft_size, n_h, adj_lists, self.agg1, num_sample=num_sample[0], gcn=gcn, cuda=torch.cuda.is_available(), activation=activation, skip_connection=skip_connection, name='l2') self.agg2 = MeanAggregator(lambda nodes: self.enc1(nodes), cuda=torch.cuda.is_available(), gcn=gcn, name='l3') self.enc2 = Encoder(lambda nodes: self.enc1(nodes), self.enc1.embed_dim, n_h, adj_lists, self.agg2, num_sample=num_sample[1], base_model=self.enc1, gcn=gcn, cuda=torch.cuda.is_available(), activation=activation, skip_connection=skip_connection, name='l4') self.read = AvgReadout() self.sigm = nn.Sigmoid() if skip_connection: self.disc = Discriminator(2 * n_h) else: self.disc = Discriminator(n_h)
def __init__(self,n_nb, n_in, n_h, activation, num_clusters, beta): super(GIC, self).__init__() self.gcn = GCN(n_in, n_h, activation) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h) self.disc_c = Discriminator_cluster(n_h,n_h,n_nb,num_clusters) self.beta = beta self.cluster = Clusterator(n_h,num_clusters)
def __init__(self, args): args.batch_size = 1 args.sparse = True args.metapaths_list = args.metapaths.split(",") args.gpu_num_ = args.gpu_num if args.gpu_num_ == 'cpu': args.device = 'cpu' else: args.device = torch.device( "cuda:" + str(args.gpu_num_) if torch.cuda.is_available() else "cpu") adj, features, labels, idx_train, idx_val, idx_test = process.load_data_dblp( args) features = [ process.preprocess_features(feature) for feature in features ] args.nb_nodes = features[0].shape[0] args.ft_size = features[0].shape[1] args.nb_classes = labels.shape[1] args.nb_graphs = len(adj) args.adj = adj adj = [process.normalize_adj(adj_) for adj_ in adj] self.adj = [ process.sparse_mx_to_torch_sparse_tensor(adj_) for adj_ in adj ] self.features = [ torch.FloatTensor(feature[np.newaxis]) for feature in features ] self.labels = torch.FloatTensor(labels[np.newaxis]).to(args.device) self.idx_train = torch.LongTensor(idx_train).to(args.device) self.idx_val = torch.LongTensor(idx_val).to(args.device) self.idx_test = torch.LongTensor(idx_test).to(args.device) self.train_lbls = torch.argmax(self.labels[0, self.idx_train], dim=1) self.val_lbls = torch.argmax(self.labels[0, self.idx_val], dim=1) self.test_lbls = torch.argmax(self.labels[0, self.idx_test], dim=1) # How to aggregate args.readout_func = AvgReadout() # Summary aggregation args.readout_act_func = nn.Sigmoid() self.args = args
def __init__(self, n_in, n_h, activation, critic="bilinear", dataset=None, attack_model=True): super(DGI, self).__init__() self.gcn = GCN(n_in, n_h, activation) self.read = AvgReadout() self.sigm = nn.Sigmoid() self.disc = Discriminator(n_h, critic=critic, dataset=dataset, attack_model=attack_model)
def __init__(self, n_in, n_h, activation): self.gcn = GCN(n_in, n_h, activation) self.read = AvgReadout()