Exemplo n.º 1
0
    def __init__(self, graph, input_dim, hidden_dim, output_dim, num_layers=5):
        """model parameters setting
        Paramters
        ---------
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        """
        super(GIN, self).__init__()
        self.g = graph
        self.num_layers = num_layers
        self.ginlayers = torch.nn.ModuleList()

        for layer in range(self.num_layers):
            if layer == 0:
                mlp = nn.Linear(input_dim, hidden_dim)
            elif layer < self.num_layers - 1:
                mlp = nn.Linear(hidden_dim, hidden_dim)
            else:
                mlp = nn.Linear(hidden_dim, output_dim)

            self.ginlayers.append(
                GINConv(ApplyNodeFunc(mlp), "sum", init_eps=0,
                        learn_eps=False))
Exemplo n.º 2
0
    def __init__(self, num_layers, d, out_d, reinit, **kwargs):

        super().__init__(emb_size=d)

        self.d = d
        self.num_layers = num_layers

        self.layers = nn.ModuleList([
            GINConv(apply_func=nn.Linear(d, d), aggregator_type="sum")
            for _ in range(num_layers)
        ])

        self.ln = nn.Linear(d, out_d)
Exemplo n.º 3
0
Arquivo: GIN.py Projeto: ru829/OCGNN
    def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
                 output_dim, final_dropout, learn_eps, graph_pooling_type,
                 neighbor_pooling_type):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        learn_eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(GIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        #这里我做了修改for layer in range(self.num_layers - 1):
        for layer in range(self.num_layers):
            if layer == 0:
                mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
            else:
                mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0,
                        self.learn_eps))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        '''
Exemplo n.º 4
0
    def __init__(self, nfeat, nhid, nclass, dropout=0.5 , lr=0.01, weight_decay=5e-4, device=None):

        super(GIN, self).__init__()

        self.dropout = dropout
        self.device = device
        self.nfeat = nfeat
        self.hidden_sizes = nhid
        self.nclass= nclass
        self.gc1 = GINConv(apply_func=None, aggregator_type='sum',init_eps=0.03)
        self.gc2 = nn.Linear(nfeat,nhid,bias=True)
        self.gc3 = nn.Linear(nhid, nclass, bias=True)
        self.lr = lr
        self.weight_decay =weight_decay
 def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs):
     super().__init__(**kwargs)
     self.layers = nn.ModuleList()
     for _ in range(self.n_layers):
         apply_func_layers = sum(
             [[nn.Linear(self.hidden_dim, self.hidden_dim),
               self.get_act(),
               self.get_norm(self.hidden_dim),
               nn.Dropout(self.p_dropout)] for _ in
              range(n_apply_func_layers)],
             [])
         apply_func = nn.Sequential(*apply_func_layers)
         self.layers.append(GINConv(apply_func=apply_func,
                                    aggregator_type=aggregator_type,
                                    init_eps=init_eps,
                                    learn_eps=learn_eps))
Exemplo n.º 6
0
    def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int],
                 num_mlp_layers: int, act: _typing.Optional[str], _eps: str,
                 neighbor_pooling_type: str):
        super(_GIN, self).__init__()
        self.__num_layers: int = len(dimensions)

        self._act: _typing.Optional[str] = act

        self.__gin_layers: torch.nn.ModuleList = torch.nn.ModuleList()
        self.__batch_normalizations: torch.nn.ModuleList = torch.nn.ModuleList(
        )
        for layer in range(self.__num_layers):
            mlp = MLP(num_mlp_layers,
                      input_dimension if layer == 0 else dimensions[layer - 1],
                      dimensions[layer], dimensions[layer])
            self.__gin_layers.append(
                GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0,
                        _eps.lower() == "true"))
            self.__batch_normalizations.append(
                torch.nn.BatchNorm1d(dimensions[layer]))
Exemplo n.º 7
0
    def __init__(
        self,
        num_layers,
        num_mlp_layers,
        input_dim,
        hidden_dim,
        output_dim,
        final_dropout,
        learn_eps,
        graph_pooling_type,
        neighbor_pooling_type,
        use_selayer,
    ):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        learn_eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(UnsupervisedGIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer)
            else:
                mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer)

            self.ginlayers.append(
                GINConv(
                    ApplyNodeFunc(mlp, use_selayer),
                    neighbor_pooling_type,
                    0,
                    self.learn_eps,
                )
            )
            self.batch_norms.append(
                SELayer(hidden_dim, int(np.sqrt(hidden_dim))) if use_selayer else nn.BatchNorm1d(hidden_dim)
            )

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(nn.Linear(input_dim, output_dim))
            else:
                self.linears_prediction.append(nn.Linear(hidden_dim, output_dim))

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == "sum":
            self.pool = SumPooling()
        elif graph_pooling_type == "mean":
            self.pool = AvgPooling()
        elif graph_pooling_type == "max":
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
Exemplo n.º 8
0
    def __init__(self, args):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(GIN, self).__init__()
        self.args = args

        missing_keys = list(
            set(
                [
                    "features_num",
                    "num_class",
                    "num_graph_features",
                    "num_layers",
                    "hidden",
                    "dropout",
                    "act",
                    "mlp_layers",
                    "eps",
                ]
            )
            - set(self.args.keys())
        )
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))

        self.num_graph_features = self.args["num_graph_features"]
        self.num_layers = self.args["num_layers"]
        assert self.num_layers > 2, "Number of layers in GIN should not less than 3"
        if not self.num_layers == len(self.args["hidden"]) + 1:
            LOGGER.warn("Warning: layer size does not match the length of hidden units")

        self.eps = True if self.args["eps"]=="True" else False
        self.num_mlp_layers = self.args["mlp_layers"]
        input_dim = self.args["features_num"]
        hidden = self.args["hidden"]
        neighbor_pooling_type = self.args["neighbor_pooling_type"]
        graph_pooling_type = self.args["graph_pooling_type"]
        if self.args["act"] == "leaky_relu":
            act = LeakyReLU()
        elif self.args["act"] == "relu":
            act = ReLU()
        elif self.args["act"] == "elu":
            act = ELU()
        elif self.args["act"] == "tanh":
            act = Tanh()
        else:
            act = ReLU()
        final_dropout = self.args["dropout"]
        output_dim = self.args["num_class"]

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(self.num_mlp_layers, input_dim, hidden[layer], hidden[layer])
            else:
                mlp = MLP(self.num_mlp_layers, hidden[layer-1], hidden[layer], hidden[layer])

            self.ginlayers.append(
                GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.eps))
            self.batch_norms.append(nn.BatchNorm1d(hidden[layer]))

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(self.num_layers):
            if layer == 0:
                self.linears_prediction.append(
                    nn.Linear(input_dim, output_dim))
            else:
                self.linears_prediction.append(
                    nn.Linear(hidden[layer-1], output_dim))

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == 'sum':
            self.pool = SumPooling()
        elif graph_pooling_type == 'mean':
            self.pool = AvgPooling()
        elif graph_pooling_type == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError