Exemplo n.º 1
0
 def __init__(self, num_features, num_classes, hidden_size, num_heads, dropout):
     super(DrGAT, self).__init__()
     self.num_features = num_features
     self.num_classes = num_classes
     self.hidden_size = hidden_size
     self.num_heads = num_heads
     self.dropout = dropout
     self.conv1 = GATLayer(num_features, hidden_size, nhead=num_heads, dropout=dropout)
     self.conv2 = GATLayer(hidden_size * num_heads, num_classes, nhead=1, dropout=dropout)
     self.se1 = SELayer(num_features, se_channels=int(np.sqrt(num_features)))
     self.se2 = SELayer(hidden_size * num_heads, se_channels=int(np.sqrt(hidden_size * num_heads)))
Exemplo n.º 2
0
 def __init__(self, mlp, use_selayer):
     super(ApplyNodeFunc, self).__init__()
     self.mlp = mlp
     self.bn = (
         SELayer(self.mlp.output_dim, int(np.sqrt(self.mlp.output_dim)))
         if use_selayer
         else nn.BatchNorm1d(self.mlp.output_dim)
     )
Exemplo n.º 3
0
def drgat_model(num_features, hidden_size, num_classes, dropout, num_heads):
    layers = nn.ModuleList()
    layers.append(nn.Dropout(p=dropout))
    layers.append(SELayer(num_features,
                          se_channels=int(np.sqrt(num_features))))
    layers.append(
        GATLayer(num_features, hidden_size, nhead=num_heads,
                 attn_drop=dropout))
    layers.append(nn.ELU())
    layers.append(nn.Dropout(p=dropout))
    layers.append(
        SELayer(hidden_size * num_heads,
                se_channels=int(np.sqrt(hidden_size * num_heads))))
    layers.append(
        GATLayer(hidden_size * num_heads,
                 num_classes,
                 nhead=1,
                 attn_drop=dropout))
    layers.append(nn.ELU())

    return layers
Exemplo n.º 4
0
    def __init__(self, num_features, num_classes, hidden_size, num_layers, dropout):
        super(DrGCN, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        shapes = [num_features] + [hidden_size] * (num_layers - 1) + [num_classes]
        self.convs = nn.ModuleList([GraphConvolution(shapes[layer], shapes[layer + 1]) for layer in range(num_layers)])
        self.ses = nn.ModuleList(
            [SELayer(shapes[layer], se_channels=int(np.sqrt(shapes[layer]))) for layer in range(num_layers)]
        )
Exemplo n.º 5
0
    def __init__(self, num_features, num_classes, hidden_size, num_layers, dropout, norm=None, activation="relu"):
        super(DrGCN, self).__init__()

        self.num_features = num_features
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.dropout = dropout
        shapes = [num_features] + [hidden_size] * (num_layers - 1) + [num_classes]
        self.convs = nn.ModuleList(
            [
                GCNLayer(shapes[layer], shapes[layer + 1], activation=activation, norm=norm)
                for layer in range(num_layers - 1)
            ]
        )
        self.convs.append(GCNLayer(shapes[-2], shapes[-1]))
        self.ses = nn.ModuleList(
            [SELayer(shapes[layer], se_channels=int(np.sqrt(shapes[layer]))) for layer in range(num_layers)]
        )
Exemplo n.º 6
0
    def __init__(self, num_layers, input_dim, hidden_dim, output_dim, use_selayer):
        """MLP layers construction

        Paramters
        ---------
        num_layers: int
            The number of linear layers
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction

        """
        super(MLP, self).__init__()
        self.linear_or_not = True  # default is linear model
        self.num_layers = num_layers
        self.output_dim = output_dim

        if num_layers < 1:
            raise ValueError("number of layers should be positive!")
        elif num_layers == 1:
            # Linear model
            self.linear = nn.Linear(input_dim, output_dim)
        else:
            # Multi-layer model
            self.linear_or_not = False
            self.linears = torch.nn.ModuleList()
            self.batch_norms = torch.nn.ModuleList()

            self.linears.append(nn.Linear(input_dim, hidden_dim))
            for layer in range(num_layers - 2):
                self.linears.append(nn.Linear(hidden_dim, hidden_dim))
            self.linears.append(nn.Linear(hidden_dim, output_dim))

            for layer in range(num_layers - 1):
                self.batch_norms.append(
                    SELayer(hidden_dim, int(np.sqrt(hidden_dim))) if use_selayer else nn.BatchNorm1d(hidden_dim)
                )
Exemplo n.º 7
0
    def __init__(
        self,
        num_layers,
        num_mlp_layers,
        input_dim,
        hidden_dim,
        output_dim,
        final_dropout,
        learn_eps,
        graph_pooling_type,
        neighbor_pooling_type,
        use_selayer,
    ):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        learn_eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(UnsupervisedGIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer)
            else:
                mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer)

            self.ginlayers.append(
                GINConv(
                    ApplyNodeFunc(mlp, use_selayer),
                    neighbor_pooling_type,
                    0,
                    self.learn_eps,
                )
            )
            self.batch_norms.append(
                SELayer(hidden_dim, int(np.sqrt(hidden_dim))) if use_selayer else nn.BatchNorm1d(hidden_dim)
            )

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(nn.Linear(input_dim, output_dim))
            else:
                self.linears_prediction.append(nn.Linear(hidden_dim, output_dim))

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == "sum":
            self.pool = SumPooling()
        elif graph_pooling_type == "mean":
            self.pool = AvgPooling()
        elif graph_pooling_type == "max":
            self.pool = MaxPooling()
        else:
            raise NotImplementedError