示例#1
0
 def __init__(self, emb_dim: int, hidden_dim: int, num_classes: int):
     super().__init__()
     self.emb_dim = emb_dim
     self.hidden_dim = hidden_dim
     self.num_classes = num_classes
     self.conv1 = GINConv(MLP(emb_dim, hidden_dim, hidden_dim), 'sum')
     self.conv2 = GINConv(MLP(hidden_dim, hidden_dim, num_classes), 'sum')
示例#2
0
    def __init__(self, input_size=1, num_classes=2):
        super(GIN, self).__init__()

        self.conv1 = GINConv(nn.Linear(input_size, num_classes),
                             aggregator_type='sum')
        self.conv2 = GINConv(nn.Linear(num_classes, num_classes),
                             aggregator_type='sum')
        self.pool = SumPooling()
示例#3
0
    def __init__(self, apply_func, aggr_type, dropout, batch_norm, residual=False, init_eps=0, learn_eps=False):
        super().__init__()
        self.apply_func = apply_func
        
        if aggr_type == 'sum':
            self._reducer = fn.sum
        elif aggr_type == 'max':
            self._reducer = fn.max
        elif aggr_type == 'mean':
            self._reducer = fn.mean
        else:
            raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
            
        self.batch_norm = batch_norm
        self.residual = residual
        self.dropout = dropout
        
        in_dim = apply_func.mlp.input_dim
        out_dim = apply_func.mlp.output_dim
        
        if in_dim != out_dim:
            self.residual = False
            
        # to specify whether eps is trainable or not.
#         if learn_eps:
#             self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
#         else:
#             self.register_buffer('eps', torch.FloatTensor([init_eps]))
            
#         self.bn_node_h = nn.BatchNorm1d(out_dim)
        self.conv = GINConv(in_dim, aggr_type, init_eps=0, learn_eps=True)
示例#4
0
文件: SWL.py 项目: 2019mohamed/SWL
def rGIN(g):
    g = dgl.from_networkx(g)
    f = np.random.standard_normal(size=(g.number_of_nodes(), 1))
    x = torch.tensor(f, dtype=torch.float)
    g.ndata['x'] = x
    lin = torch.nn.Linear(1, 1)
    conv = GINConv(lin, 'sum')
    res = conv(g, x)
    sumpool = SumPooling()
    return sumpool(g, res)[0].detach().numpy()
示例#5
0
 def __init__(self, g, in_feats, n_classes, n_hidden, n_layers, init_eps,
              learn_eps):
     super(GIN, self).__init__()
     self.g = g
     self.layers = nn.ModuleList()
     self.layers.append(
         GINConv(
             nn.Sequential(
                 nn.Dropout(0.6),
                 nn.Linear(in_feats, n_hidden),
                 nn.ReLU(),
             ),
             "mean",
             init_eps,
             learn_eps,
         ))
     for i in range(n_layers - 1):
         self.layers.append(
             GINConv(
                 nn.Sequential(
                     nn.Dropout(0.6),
                     nn.Linear(n_hidden, n_hidden),
                     nn.ReLU(),
                 ),
                 "mean",
                 init_eps,
                 learn_eps,
             ))
     self.layers.append(
         GINConv(
             nn.Sequential(
                 nn.Dropout(0.6),
                 nn.Linear(n_hidden, n_classes),
             ),
             "mean",
             init_eps,
             learn_eps,
         ))
示例#6
0
 def __init__(self, hidden_size, n_layers, n_states=2, dropout=0.0, feat_dropout=0.5, norm_type=None, agg_type='sum', with_attr=False):
     super().__init__()
     self.agg_type = agg_type
     self.with_attr = with_attr
     self.state_embedding = nn.Embedding(n_states, hidden_size)
     if with_attr:
         self.feat_mapping = make_linear_block(hidden_size, hidden_size,
                                               residual=False, dropout=feat_dropout, bias=False)
     self.gconv_layers = nn.ModuleList([GINConv(None, agg_type) for _ in range(n_layers)])
     self.fc_layers = nn.ModuleList([make_linear_block(hidden_size, hidden_size, act_cls=Swish,
                                                       norm_type=norm_type, dropout=dropout)
                                     for _ in range(n_layers)])
     self.scoring_layer = make_linear_block(hidden_size * n_layers, 2,
                                            norm_type=norm_type, dropout=dropout, act_cls=Swish)
示例#7
0
    def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_hidden_layers,
                 activation,
                 dropout):
        super(GIN, self).__init__()
        self.g = g
        self.layers = nn.ModuleList()
        
        self.input_MLP =  nn.Linear(in_feats, n_hidden)
        self.hidden_MLP = nn.Linear(n_hidden, n_hidden)
        self.output_MLP = nn.Linear(n_hidden, n_classes)

        # input layer
        self.layers.append(GINConv(apply_func=self.input_MLP, aggreAGNNor_type='sum'))
        # hidden layers
        for i in range(n_hidden_layers - 1):
            self.layers.append(GINConv(apply_func=self.hidden_MLP, aggreAGNNor_type='sum'))
        # output layer
        self.layers.append(GINConv(apply_func=self.output_MLP, aggreAGNNor_type='sum'))
示例#8
0
 def __init__(self, device='cpu', dim=64, model_type="gcn"):  #初始化
     super().__init__()
     self.dim = dim
     self.device = device
     self.embed = nn.Embedding(500, dim)
     if model_type == "gcn":
         self.conv = GraphConv(dim, dim)
     elif model_type == "gat":
         self.conv = GATConv(dim, dim, 1)
     elif model_type == "gin":
         print("gin")
         self.conv = GINConv(apply_func=nn.Linear(dim, dim),
                             aggregator_type="mean")
     elif model_type == "sage":
         print("sage")
         self.conv = SAGEConv(dim, dim, aggregator_type="gcn")
     self.func = nn.Linear(dim, 2)
     self.act = nn.ReLU()