Esempio n. 1
0
    def build_model(self):
        self.layers = nn.ModuleList()
        # input to hidden
        if (self.num_layers == 1):
            i2h = RelGraphConv(self.features_dim, self.out_dim,
                               self.num_rels)  #, activation=nn.ReLU())
        else:
            i2h = RelGraphConv(self.features_dim,
                               self.h_dim,
                               self.num_rels,
                               activation=nn.ReLU(),
                               dropout=0.1)
        self.layers.append(i2h)

        # hidden to hidden
        if (self.num_layers > 2):
            for _ in range(self.num_layers - 2):
                h2h = RelGraphConv(self.h_dim,
                                   self.h_dim,
                                   self.num_rels,
                                   activation=nn.ReLU())
                self.layers.append(h2h)
        # hidden to output
        if (self.num_layers >= 2):
            h2o = RelGraphConv(self.h_dim,
                               self.out_dim,
                               self.num_rels,
                               activation=nn.ReLU(),
                               dropout=0.1)
            self.layers.append(h2o)

        self.linear = nn.Linear(self.out_dim, 2)  # for softmax
Esempio n. 2
0
 def build_model(self):
     self.layers = nn.ModuleList()
     # input to hidden
     i2h = RelGraphConv(self.features_dim, self.h_dim, self.num_rels, activation=nn.ReLU())
     self.layers.append(i2h)
     # hidden to hidden
     for _ in range(self.num_hidden_layers):
         h2h = RelGraphConv(self.h_dim, self.h_dim, self.num_rels, activation=nn.ReLU())
         self.layers.append(h2h)
     # hidden to output
     h2o = RelGraphConv(self.h_dim, self.out_dim, self.num_rels, activation=nn.ReLU())
     self.layers.append(h2o)
Esempio n. 3
0
 def build_hidden_layer(self, in_dim, out_dim):
     return RelGraphConv(in_dim,
                         out_dim,
                         self.num_rels,
                         num_bases=self.num_bases,
                         activation=F.relu,
                         self_loop=self.self_loop)
 def __init__(self, in_feat, out_feat, num_rels, regularizer="basis", num_bases=None, act_func="relu", dropout=0.0):
     super(RGINLayer, self).__init__()
     self.rgc_layer = RelGraphConv(
         in_feat=in_feat, out_feat=out_feat, num_rels=num_rels,
         regularizer=regularizer, num_bases=num_bases,
         activation=None, self_loop=True, dropout=0.0)
     self.mlp = nn.Sequential(
         nn.Linear(out_feat, out_feat),
         # nn.BatchNorm1d(out_feat),
         map_activation_str_to_layer(act_func), 
         nn.Linear(out_feat, out_feat),
         map_activation_str_to_layer(act_func))
     self.drop = nn.Dropout(dropout)
     
     # init
     if hasattr(self.rgc_layer, "weight") and self.rgc_layer.weight is not None:
         nn.init.normal_(self.rgc_layer.weight, 0.0, 1/(out_feat)**0.5)
     if hasattr(self.rgc_layer, "w_comp") and self.rgc_layer.w_comp is not None:
         nn.init.normal_(self.rgc_layer.w_comp, 0.0, 1/(out_feat)**0.5)
     if hasattr(self.rgc_layer, "loop_weight") and self.rgc_layer.loop_weight is not None:
         nn.init.normal_(self.rgc_layer.loop_weight, 0.0, 1/(out_feat)**0.5)
     if hasattr(self.rgc_layer, "h_bias") and self.rgc_layer.h_bias is not None:
         nn.init.zeros_(self.rgc_layer.h_bias)
     for m in self.mlp.modules():
         if isinstance(m, nn.Linear):
             nn.init.normal_(m.weight, 0.0, 1/(out_feat)**0.5)
             if hasattr(m, "bias") and m.bias is not None:
                 nn.init.zeros_(m.bias)
         elif isinstance(m, nn.BatchNorm1d):
             nn.init.ones_(m.weight)
             nn.init.zeros_(m.bias)
    def create_net(self, name, input_dim, **kw):
        num_layers = kw.get("num_layers", 1)
        hidden_dim = kw.get("hidden_dim", 64)
        num_rels = kw.get("num_rels", 1)
        num_bases = kw.get("num_bases", 8)
        regularizer = kw.get("regularizer", "basis")
        act_func = kw.get("act_func", "relu")
        dropout = kw.get("dropout", 0.0)

        rgcns = nn.ModuleList()
        for i in range(num_layers):
            rgcns.add_module(
                "%s_rgc%d" % (name, i),
                RelGraphConv(in_feat=hidden_dim if i > 0 else input_dim,
                             out_feat=hidden_dim,
                             num_rels=num_rels,
                             regularizer=regularizer,
                             num_bases=num_bases,
                             activation=map_activation_str_to_layer(act_func),
                             self_loop=True,
                             dropout=dropout))

        for m in rgcns.modules():
            if isinstance(m, RelGraphConv):
                if hasattr(m, "weight") and m.weight is not None:
                    nn.init.normal_(m.weight, 0.0, 1 / (hidden_dim)**0.5)
                if hasattr(m, "w_comp") and m.w_comp is not None:
                    nn.init.normal_(m.w_comp, 0.0, 1 / (hidden_dim)**0.5)
                if hasattr(m, "loop_weight") and m.loop_weight is not None:
                    nn.init.normal_(m.loop_weight, 0.0, 1 / (hidden_dim)**0.5)
                if hasattr(m, "h_bias") and m.h_bias is not None:
                    nn.init.zeros_(m.h_bias)

        return rgcns, hidden_dim
Esempio n. 6
0
    def build_output_layer(self, in_dim, out_dim):
        if self.lin_output: return nn.Linear(in_dim, out_dim)

        return RelGraphConv(in_dim,
                            out_dim,
                            self.num_rels,
                            num_bases=self.num_bases,
                            activation=torch.sigmoid)
Esempio n. 7
0
    def __init__(self, input_dim, output_dim, **kwargs):
        super().__init__(input_dim, output_dim)
        hidden_dim = kwargs.get('hidden_dim', 32)
        num_layers = kwargs.get('num_layers', 2)

        self.num_layers = num_layers
        self.linear_in = nn.Linear(input_dim, hidden_dim)
        self.conv = RelGraphConv(2*hidden_dim, hidden_dim, len(edge_types), activation=torch.tanh)
        self.g_embed = nn.Linear(hidden_dim, output_dim)
Esempio n. 8
0
    def __init__(self, input_dim, output_dim, **kwargs):
        super().__init__(input_dim, output_dim)

        hidden_dims = kwargs.get('hidden_dims', [32])
        self.num_layers = len(hidden_dims)

        hidden_plus_input_dims = [hd + input_dim for hd in hidden_dims]
        self.convs = nn.ModuleList([RelGraphConv(in_dim, out_dim, len(edge_types), activation=F.relu)
            for (in_dim, out_dim) in zip([input_dim] + hidden_plus_input_dims[:-1], hidden_dims)])

        self.g_embed = nn.Linear(hidden_dims[-1], output_dim)
Esempio n. 9
0
    def build_model(self):
        self.layers = nn.ModuleList()
        # input to hidden
        if (self.num_layers == 1):
            i2h = RelGraphConv(self.features_dim,
                               self.out_dim,
                               self.num_rels,
                               num_bases=self.num_bases,
                               dropout=self.d)
        else:
            i2h = RelGraphConv(self.features_dim,
                               self.h_dim,
                               self.num_rels,
                               num_bases=self.num_bases,
                               activation=nn.ReLU(),
                               dropout=self.d)
        self.layers.append(i2h)

        # hidden to hidden
        if (self.num_layers > 2):
            for _ in range(self.num_layers - 2):
                h2h = RelGraphConv(self.h_dim,
                                   self.h_dim,
                                   self.num_rels,
                                   num_bases=self.num_bases,
                                   activation=nn.ReLU(),
                                   dropout=self.d)
                self.layers.append(h2h)

        # hidden to output
        if (self.num_layers >= 2):
            h2o = RelGraphConv(self.h_dim,
                               self.out_dim,
                               self.num_rels,
                               num_bases=self.num_bases,
                               dropout=self.d)
            self.layers.append(h2o)
Esempio n. 10
0
    def __init__(self, in_feats, out_feats, num_rels=64*21, activation=F.relu, loop=False,
                 residual=True, batchnorm=True, rgcn_drop_out=0.5):
        super(RGCNLayer, self).__init__()

        self.activation = activation
        self.graph_conv_layer = RelGraphConv(in_feats, out_feats, num_rels=num_rels, regularizer='basis',
                                               num_bases=None, bias=True, activation=activation,
                                               self_loop=loop, dropout=rgcn_drop_out)
        self.residual = residual
        if residual:
            self.res_connection = nn.Linear(in_feats, out_feats)

        self.bn = batchnorm
        if batchnorm:
            self.bn_layer = nn.BatchNorm1d(out_feats)
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.layers = nn.ModuleList()
     self.n_relations = 2 * len(
         self.db_info['edge_type_to_int']
     ) - 1  # there are negative edge types for the reverse edges
     for _ in range(self.n_layers):
         self.layers.append(
             nn.ModuleDict({
                 'rgc':
                 RelGraphConv(
                     in_feat=self.hidden_dim,
                     out_feat=self.hidden_dim,
                     num_rels=self.n_relations,
                     regularizer='bdd',
                     num_bases=8,
                     bias=True,
                     dropout=self.p_dropout,
                     activation=self.get_act(),
                     self_loop=False,  # It's already in the data
                 ),
                 'norm':
                 self.get_norm(self.hidden_dim)
             }))
Esempio n. 12
0
 def build_output_layer(self, in_dim, out_dim):
     return RelGraphConv(in_dim,
                         out_dim,
                         self.num_rels,
                         num_bases=self.num_bases,
                         activation=None)