示例#1
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 layer_num,
                 sample_size,
                 device,
                 GNN_name="GIN"):
        super(GNNStructEncoder, self).__init__()
        self.n_distribution = 7  # How many gaussian distribution should exist
        self.out_dim = hidden_dim
        if GNN_name == "GIN":
            self.linear1 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv1 = GINConv(apply_func=self.linear1,
                                      aggregator_type='sum')
            self.linear2 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv2 = GINConv(apply_func=self.linear2,
                                      aggregator_type='sum')
        elif GNN_name == "GCN":
            self.graphconv1 = GraphConv(hidden_dim, hidden_dim)
            self.graphconv2 = GraphConv(hidden_dim, hidden_dim)
        else:
            self.graphconv = GATConv(hidden_dim, hidden_dim, num_heads=10)
        # self.neighbor_num_list = neighbor_num_list
        self.linear_classifier = MLP(1, hidden_dim, hidden_dim,
                                     self.n_distribution)
        self.neighbor_generator = MLP_generator(hidden_dim, hidden_dim,
                                                sample_size).to(device)
        # Gaussian Means, and std
        self.gaussian_mean = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.m = torch.distributions.Normal(
            torch.zeros(sample_size, self.n_distribution, hidden_dim),
            torch.ones(sample_size, self.n_distribution, hidden_dim))

        # Before MLP Gaussian Means, and std
        self.mlp_gaussian_mean = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_m = torch.distributions.Normal(torch.zeros(hidden_dim),
                                                torch.ones(hidden_dim))

        # Decoders
        self.degree_decoder = FNN(hidden_dim, hidden_dim, 1, 4)
        # self.degree_loss_func = FocalLoss(int(max_degree_num) + 1)
        self.degree_loss_func = nn.MSELoss()
        self.pool = mp.Pool(1)
        self.in_dim = in_dim
        self.sample_size = sample_size
示例#2
0
    def __init__(self, in_dim, hid_dim, n_layer):
        super(GINEncoder, self).__init__()

        self.n_layer = n_layer

        self.convs = ModuleList()
        self.bns = ModuleList()

        for i in range(n_layer):
            if i == 0:
                n_in = in_dim
            else:
                n_in = hid_dim
            n_out = hid_dim
            block = Sequential(Linear(n_in, n_out), ReLU(),
                               Linear(hid_dim, hid_dim))

            conv = GINConv(block, 'sum')
            bn = BatchNorm1d(hid_dim)

            self.convs.append(conv)
            self.bns.append(bn)

        # sum pooling
        self.pool = SumPooling()
示例#3
0
    def __init__(self, in_feats, h_feats, num_classes, pooling):
        super(GCN_GINConv, self).__init__()
        assert isinstance(h_feats, list), "h_feats must be a list"
        assert len(
            h_feats) != 0, "h_feats is empty. unable to add hidden layers"
        self.list_of_layers = []
        dim = [in_feats] + h_feats

        # Convolution (Hidden) Layers
        for i in range(1, len(dim)):
            lin = nn.Linear(dim[i - 1], dim[i])
            self.list_of_layers.append(GINConv(lin, 'sum'))

        # Final Layer
        self.final = nn.Linear(dim[-1], num_classes)

        # Pooling layer
        if pooling == "AvgPooling":
            self.pooling_layer = dgl.nn.AvgPooling()
        elif pooling == "MaxPooling":
            self.pooling_layer = dgl.nn.MaxPooling()
        elif pooling == "SumPooling":
            self.pooling_layer = dgl.nn.SumPooling()
        else:
            raise NotImplementedError
示例#4
0
    def __init__(self,
                 G,
                 hid_dims,
                 num_layers,
                 aggregator_type='sum',
                 multihot=True):
        super().__init__(G, hid_dims, num_layers, multihot)

        self.aggregator_type = aggregator_type
        self.input_layer = GINConv(nn.Linear(self.in_dims, hid_dims),
                                   aggregator_type)
        self.hidden_layers = [
            GINConv(nn.Linear(hid_dims, hid_dims), aggregator_type)
            for _ in range(num_layers)
        ]
        self.output_layer = GINConv(nn.Linear(hid_dims, self.out_dims),
                                    aggregator_type)
示例#5
0
    def __init__(self, num_features, num_classes, dim=10):
        super(NetGIN, self).__init__()

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1, "sum")

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2, "sum")

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3, "sum")

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4, "sum")

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5, "sum")

        self.l1 = Linear(dim, 1, bias=False)
        self.l2 = Linear(dim, 1, bias=False)
        self.l3 = Linear(dim, 1, bias=False)
        self.l4 = Linear(dim, 1, bias=False)
        self.l5 = Linear(dim, 1, bias=False)
示例#6
0
文件: gin.py 项目: wangxiaoyunNV/dgl
    def __init__(self,
                 data_info: dict,
                 embed_size: int = -1,
                 hidden_size=64,
                 num_layers=3,
                 aggregator_type='sum'):
        """Graph Isomophism Networks

        Edge feature is ignored in this model.

        Parameters
        ----------
        data_info : dict
            The information about the input dataset.
        embed_size : int
            The dimension of created embedding table. -1 means using original node embedding
        hidden_size : int
            Hidden size.
        num_layers : int
            Number of layers.
        aggregator_type : str
            Aggregator type to use (``sum``, ``max`` or ``mean``), default: 'sum'.
        """
        super().__init__()
        self.data_info = data_info
        self.embed_size = embed_size
        self.conv_list = nn.ModuleList()
        self.num_layers = num_layers
        if embed_size > 0:
            self.embed = nn.Embedding(data_info["num_nodes"], embed_size)
            in_size = embed_size
        else:
            in_size = data_info["in_size"]
        for i in range(num_layers):
            input_dim = in_size if i == 0 else hidden_size
            mlp = nn.Sequential(nn.Linear(input_dim, hidden_size),
                                nn.BatchNorm1d(hidden_size), nn.ReLU(),
                                nn.Linear(hidden_size, hidden_size), nn.ReLU())

            self.conv_list.append(GINConv(mlp, aggregator_type, 1e-5, True))
        self.out_mlp = nn.Linear(hidden_size, self.out_size)