def test_gated_graph_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    value = torch.rand(row.size(0))
    adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))
    adj1 = adj2.set_value(None)

    conv = GatedGraphConv(32, num_layers=3)
    assert conv.__repr__() == 'GatedGraphConv(32, num_layers=3)'
    out1 = conv(x, edge_index)
    assert out1.size() == (4, 32)
    assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6)
    out2 = conv(x, edge_index, value)
    assert out2.size() == (4, 32)
    assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6)

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x, edge_index).tolist() == out1.tolist()
    assert jit(x, edge_index, value).tolist() == out2.tolist()

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6)
    assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6)
Пример #2
0
    def __init__(self, in_feats, hid_feats, out_feats):
        super(BUrumorGGCN, self).__init__()
        self.conv1 = GatedGraphConv(in_feats, hid_feats, aggr='add', bias=True)

        self.conv2 = GatedGraphConv(hid_feats + in_feats,
                                    out_feats,
                                    aggr='add',
                                    bias=True)
def test_gcn_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = GatedGraphConv(out_channels, num_layers=3)
    assert conv.__repr__() == 'GatedGraphConv(32, num_layers=3)'
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
Пример #4
0
 def __init__(self,
              features_num=16,
              num_class=2,
              dropout=0.2,
              num_layers=2,
              hidden=16):
     super(GGC, self).__init__()
     self.conv1 = GatedGraphConv(features_num, hidden)
     self.conv2 = GatedGraphConv(hidden, num_class)
     self.dropout = dropout
Пример #5
0
    def __init__(self, embeddings, word_vec_size, gnn_type, gnn_layers,
                 rnn_size):
        super(GraphEncoder, self).__init__()

        self.is_graph_encoder = True

        self.gnn_type = gnn_type
        self.gnn_layers = gnn_layers
        self.embeddings = embeddings
        self.dropout = nn.Dropout(0.3)
        self.num_inputs = word_vec_size
        self.rnn_size = rnn_size

        if self.gnn_type == 'ggnn':
            self.gnn_td = GatedGraphConv(self.num_inputs, self.gnn_layers)
            self.gnn_bu = GatedGraphConv(self.num_inputs, self.gnn_layers)
        elif self.gnn_type == 'gat':
            self.gnn_td = GATConv(self.num_inputs,
                                  self.num_inputs,
                                  heads=self.gnn_layers,
                                  concat=False,
                                  dropout=0.3)
            self.gnn_bu = GATConv(self.num_inputs,
                                  self.num_inputs,
                                  heads=self.gnn_layers,
                                  concat=False,
                                  dropout=0.3)
        else:
            self.gins_td = []
            self.gins_bu = []
            num_layers = self.gnn_layers
            nn_td = Sequential(Linear(self.num_inputs,
                                      self.num_inputs), ReLU(),
                               Linear(self.num_inputs, self.num_inputs))
            nn_tb = Sequential(Linear(self.num_inputs,
                                      self.num_inputs), ReLU(),
                               Linear(self.num_inputs, self.num_inputs))
            for x in range(num_layers):
                gin = GINConv(nn_td)
                self.gins_td.append(gin.cuda())
                gin = GINConv(nn_tb)
                self.gins_bu.append(gin.cuda())

        self.bilstm = nn.LSTM(self.rnn_size,
                              self.rnn_size // 2,
                              num_layers=2,
                              bidirectional=True,
                              dropout=0.3)

        if self.gnn_type == 'gin':
            self.layers_seq = nn.Sequential(*self.gins_td, *self.gins_bu,
                                            self.bilstm)
Пример #6
0
 def __init__(self, device: torch.device, *,
     embedding_dim: int = 64,
     no_shortcut: bool = False,
     **kwargs
 ):
     super().__init__(device)
     self.embedding_dim = embedding_dim
     self.no_shortcut = no_shortcut
     # self.aggregator_type = aggregator_type
     self.embed = GatedGraphConv(embedding_dim, 2, aggr='mean')
     self.conv2= GatedGraphConv(embedding_dim, 2, aggr='mean')
     self.fc = nn.Linear(embedding_dim, 2)
     self.activate = nn.ELU()
Пример #7
0
    def __init__(self, net_params):
        super().__init__()

        in_dim_node = net_params['in_dim']  # node_dim (feat is an integer)
        in_dim_edge = 1  # edge_dim (feat is a float)
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        self.dropout = net_params['dropout']
        n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)

        self.embedding_h = nn.Linear(in_dim_node,
                                     hidden_dim)  # node feat is an integer
        # self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)  # edge feat is a float
        self.layers = nn.ModuleList(
            [GatedGraphConv(hidden_dim, n_layers, aggr='add')])
        # self.layers = nn.ModuleList([GatedGCNLayer(hidden_dim, hidden_dim, dropout,
        #                                            self.batch_norm, self.residual) for _ in range(n_layers)])
        if self.batch_norm:
            self.normlayers = nn.ModuleList([nn.BatchNorm1d(hidden_dim)])
        # self.MLP_layer = MLPReadout(hidden_dim, n_classes)
        self.MLP_layer = nn.Linear(hidden_dim, n_classes, bias=True)
Пример #8
0
    def __init__(self, hidden_size, opt, n_node, step=1):
        super(GNN, self).__init__()
        self.step = step
        self.hidden_size = hidden_size
        self.input_size = hidden_size * 2
        self.gate_size = 3 * hidden_size
        self.w_ih = Parameter(torch.Tensor(self.gate_size, self.input_size))
        self.w_hh = Parameter(torch.Tensor(self.gate_size, self.hidden_size))
        self.b_ih = Parameter(torch.Tensor(self.gate_size))
        self.b_hh = Parameter(torch.Tensor(self.gate_size))
        self.b_iah = Parameter(torch.Tensor(self.hidden_size))
        self.b_oah = Parameter(torch.Tensor(self.hidden_size))

        self.linear_edge_in = nn.Linear(self.hidden_size,
                                        self.hidden_size,
                                        bias=True)
        self.linear_edge_out = nn.Linear(self.hidden_size,
                                         self.hidden_size,
                                         bias=True)
        self.linear_edge_f = nn.Linear(self.hidden_size,
                                       self.hidden_size,
                                       bias=True)

        heads = 1
        self.conv1 = GATConv(self.hidden_size,
                             self.hidden_size,
                             heads=heads,
                             dropout=0.6)
        # On the Pubmed dataset, use heads=8 in conv2.
        self.conv2 = GATConv(heads * self.hidden_size,
                             self.hidden_size,
                             heads=1,
                             concat=False,
                             dropout=0.6)
        self.ggnn = GatedGraphConv(self.hidden_size, step + 1)
Пример #9
0
    def __init__(self, input_size, hidden_size, output_size, seq_len,
                 edge_embedding_dim, sampling, recurrent):
        super(Decoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.seq_len = seq_len
        self.edge_embedding_dim = edge_embedding_dim
        self.sampling = sampling
        self.recurrent = recurrent
        self.node_transform = Sequential(
            Linear(self.input_size, self.output_size), ReLU())
        if self.recurrent == True:
            self.rnn_graph_conv = GatedGraphConv(out_channels=self.input_size,
                                                 num_layers=self.num_layers)
        else:
            self.MLP = Sequential(
                Linear(self.edge_embedding_dim, self.hidden_size), ReLU(),
                Linear(self.hidden_size,
                       2 * self.input_size * self.input_size))
            self.graph_conv_1 = MLPGraphConv(in_channels=self.input_size,
                                             out_channels=self.input_size,
                                             nn=self.MLP,
                                             root_weight=True,
                                             bias=True,
                                             aggr='add')
            self.graph_conv_2 = MLPGraphConv(in_channels=self.input_size,
                                             out_channels=self.input_size,
                                             nn=self.MLP,
                                             root_weight=True,
                                             bias=True,
                                             aggr='add')

            self.graph_conv_list = ModuleList(
                [self.graph_conv_1, self.graph_conv_2])
Пример #10
0
 def get_layer(self, in_dim, out_dim):
     if self is GNN_TYPE.GCN:
         return GCNConv(in_channels=in_dim, out_channels=out_dim)
     elif self is GNN_TYPE.GGNN:
         return GatedGraphConv(out_channels=out_dim, num_layers=1)
     elif self is GNN_TYPE.GIN:
         return GINConv(
             nn.Sequential(nn.Linear(in_dim, out_dim),
                           nn.BatchNorm1d(out_dim), nn.ReLU(),
                           nn.Linear(out_dim, out_dim),
                           nn.BatchNorm1d(out_dim), nn.ReLU()))
     elif self is GNN_TYPE.GAT:
         # 4-heads, although the paper by Velickovic et al. had used 6-8 heads.
         # The output will be the concatenation of the heads, yielding a vector of size out_dim
         num_heads = 4
         return GATConv(in_dim, out_dim // num_heads, heads=num_heads)
     elif self is GNN_TYPE.GAT2:
         # 4-heads, although the paper by Velickovic et al. had used 6-8 heads.
         # The output will be the concatenation of the heads, yielding a vector of size out_dim
         num_heads = 4
         return GAT2Conv(in_dim, out_dim // num_heads, heads=num_heads)
     elif self is GNN_TYPE.LUONG_GAT:
         # 4-heads, although the paper by Velickovic et al. had used 6-8 heads.
         # The output will be the concatenation of the heads, yielding a vector of size out_dim
         num_heads = 4
         return LuongGATConv(in_dim, out_dim // num_heads, heads=num_heads)
     elif self is GNN_TYPE.DP_GAT:
         # 4-heads, although the paper by Velickovic et al. had used 6-8 heads.
         # The output will be the concatenation of the heads, yielding a vector of size out_dim
         num_heads = 4
         return DotProductGATConv(in_dim,
                                  out_dim // num_heads,
                                  heads=num_heads)
Пример #11
0
    def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim,
                 layer_timesteps=[5], num_class=0):
        super(GGNN_Simple, self).__init__()
        self.num_class = num_class  # if we do classification
        self.layer_timesteps = layer_timesteps
        # self.residual_connections = residual_connections

        #     'use_edge_bias': False,
        self.emb_dim = emb_dim
        self.num_vocab = num_vocab
        self.max_seq_len = max_seq_len
        self.node_encoder = node_encoder

        self.convs = []
        for layer_idx, t in enumerate(layer_timesteps):
            self.convs += [GatedGraphConv(emb_dim, t)]
        self.convs = nn.ModuleList(self.convs)

        self.classifier_l = nn.Sequential(
            nn.Linear(2*emb_dim, emb_dim),
            nn.Sigmoid()
        )
        self.classifier_r = nn.Sequential(
            nn.Linear(2*emb_dim, emb_dim),
            nn.Tanh()
        )

        if self.num_class > 0:
            self.graph_pred_linear = torch.nn.Linear(emb_dim, self.num_class)
        else:
            self.graph_pred_linear_list = torch.nn.ModuleList()

            for i in range(max_seq_len):
                self.graph_pred_linear_list.append(torch.nn.Linear(emb_dim, self.num_vocab))
Пример #12
0
    def __init__(self, embed_size, vocab_size, keyword_vocab_size, hidden_size, output_size, n_layers, gnn, aggregation,
                 n_heads=0, dropout=0, bidirectional=False, \
                 utterance_encoder="", keywordid2wordid=None, keyword_mask_matrix=None, nodeid2wordid=None,
                 keywordid2nodeid=None, concept_encoder="mean", \
                 combine_node_emb="mean"):
        super(KW_GNN, self).__init__()
        self.embed_size = embed_size
        self.vocab_size = vocab_size
        self.keyword_vocab_size = keyword_vocab_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.gnn = gnn
        self.aggregation = aggregation
        self.n_heads = n_heads
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.utterance_encoder_name = utterance_encoder
        self.keywordid2wordid = keywordid2wordid
        self.keyword_mask_matrix = keyword_mask_matrix
        self.nodeid2wordid = nodeid2wordid
        self.keywordid2nodeid = keywordid2nodeid
        self.concept_encoder = concept_encoder
        self.combine_node_emb = combine_node_emb
        self.num_nodes = nodeid2wordid.shape[0]

        self.embedding = nn.Embedding(vocab_size, embed_size)

        # GNN learning
        if gnn == "GatedGraphConv":
            self.conv1 = GatedGraphConv(hidden_size, num_layers=n_layers)
            output_size = hidden_size

        if n_layers == 1:
            output_size = hidden_size

        # aggregation
        if aggregation in ["mean", "max"]:
            output_size = output_size

        # utterance encoder
        if self.utterance_encoder_name == "HierGRU":
            self.utterance_encoder = nn.GRU(embed_size,
                                            hidden_size,
                                            1,
                                            batch_first=True,
                                            dropout=dropout,
                                            bidirectional=bidirectional)
            self.context_encoder = nn.GRU(
                2 * hidden_size if bidirectional else hidden_size,
                hidden_size,
                1,
                batch_first=True,
                dropout=dropout,
                bidirectional=bidirectional)
            output_size = output_size + 2 * hidden_size if bidirectional else output_size + hidden_size

        # final linear layer
        self.mlp = nn.Linear(output_size, keyword_vocab_size)
Пример #13
0
 def __init__(self, hidden_size, n_node):
     super(GNNModel, self).__init__()
     self.hidden_size, self.n_node = hidden_size, n_node
     self.embedding = nn.Embedding(self.n_node, self.hidden_size)
     self.gated = GatedGraphConv(self.hidden_size, num_layers=1)
     self.e2s = Embedding2Score(self.hidden_size)
     self.loss_function = nn.CrossEntropyLoss()
     self.reset_parameters()
Пример #14
0
    def _create_layers(self):
        self.conv_layer = GatedGraphConv(out_channels=self.conv_out_channels,
                                         num_layers=self.conv_num_layers,
                                         aggr=self.conv_aggr,
                                         bias=True)

        self.recurrent_layer = LSTM(input_size=self.conv_out_channels,
                                    hidden_size=self.lstm_out_channels,
                                    num_layers=self.lstm_num_layers)
Пример #15
0
    def __init__(self, node_size):
        super(GatedGCN, self).__init__()

        self.node_per_graph = node_size

        self.linprev = EdgeConv(MLP([4*2, 64, 64, 64]), aggr = 'max')
        self.conv1 = GatedGraphConv(256, 2)
        self.bn1 = torch.nn.BatchNorm1d(256)
        self.conv2 = GatedGraphConv(256, 2)
        self.bn2 = torch.nn.BatchNorm1d(256)
        self.conv3 = GatedGraphConv(256, 2)
        self.bn3 = torch.nn.BatchNorm1d(256)
        self.conv4 = GatedGraphConv(256, 3)
        self.bn4 = torch.nn.BatchNorm1d(256)

        self.mlp = Seq(
            MLP([256*2*4, 512]), Dropout(0.4), MLP([512, 128]), Dropout(0.4),
            Lin(128, 40))
def test_gated_graph_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    edge_weight = torch.randn(edge_index.size(1))
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = GatedGraphConv(out_channels, num_layers=3)
    assert conv.__repr__() == 'GatedGraphConv(32, num_layers=3)'
    out1 = conv(x, edge_index)
    assert out1.size() == (num_nodes, out_channels)
    out2 = conv(x, edge_index, edge_weight)
    assert out2.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index).tolist() == out1.tolist()
    assert jit_conv(x, edge_index, edge_weight).tolist() == out2.tolist()
Пример #17
0
 def __init__(self, vocablen, embedding_dim, num_layers, device):
     super(GGNN, self).__init__()
     self.device = device
     #self.num_layers=num_layers
     self.embed = nn.Embedding(vocablen, embedding_dim)
     self.edge_embed = nn.Embedding(20, embedding_dim)
     #self.gmn=nn.ModuleList([GMNlayer(embedding_dim,embedding_dim) for i in range(num_layers)])
     self.ggnnlayer = GatedGraphConv(embedding_dim, num_layers)
     self.mlp_gate = nn.Sequential(nn.Linear(embedding_dim, 1),
                                   nn.Sigmoid())
     self.pool = GlobalAttention(gate_nn=self.mlp_gate)
Пример #18
0
def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> nn.Module:
    """

    :param gnn_name:
    :param in_dim:
    :param out_dim:
    :param concat: for gat, concat multi-head output or not
    :return: GNN model
    """
    if gnn_name == "gat_8":
        return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)
    elif gnn_name == "gat_6":
        return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)
    elif gnn_name == "gat_4":
        return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)
    elif gnn_name == "gat_2":
        return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)
    elif gnn_name in ["gat_1", "gat"]:
        return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)
    elif gnn_name == "gcn":
        return GCNConv(in_dim, out_dim)
    elif gnn_name == "cheb":
        return ChebConv(in_dim, out_dim, K=2, bias=bias)
    elif gnn_name == "sage":
        return SAGEConv(in_dim, out_dim, bias=bias)
    elif gnn_name == "gated":
        return GatedGraphConv(in_dim, out_dim, bias=bias)
    elif gnn_name == "arma":
        return ARMAConv(in_dim, out_dim, bias=bias)
    elif gnn_name == "sg":
        return SGConv(in_dim, out_dim, bias=bias)
    elif gnn_name == "linear":
        return LinearConv(in_dim, out_dim, bias=bias)
    elif gnn_name == "zero":
        return ZeroConv()
    elif gnn_name == "identity":
        return Identity()
    elif hasattr(torch_geometric.nn, gnn_name):
        cls = getattr(torch_geometric.nn, gnn_name)
        assert isinstance(cls,
                          type), "Only support modules, get %s" % (gnn_name)
        kwargs = {
            "in_channels": in_dim,
            "out_channels": out_dim,
            "concat": concat,
            "bias": bias,
        }
        kwargs = {
            key: kwargs[key]
            for key in cls.__init__.__code__.co_varnames if key in kwargs
        }
        return cls(**kwargs)
    raise KeyError("Cannot parse key %s" % (gnn_name))
    def __init__(self, args, trained=True, bn=False, backbone_type='res101'):
        super(Vrd_Graph_GA_v4, self).__init__()
        self.n_obj = args.num_classes
        self.n_rel = args.num_relations
        global res101
        if trained and backbone_type == 'res101':
            res101 = resnet.__dict__['resnet101'](pretrained=False, norm_layer=FrozenBatchNorm2d)
            weight_path = "../models/resnet101-5d3b4d8f.pth"
            state = torch.load(weight_path)
            res101.load_state_dict(state)
            layers_res = OrderedDict()
            for k, v in res101.named_children():
                if k in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
                    layers_res[k] = v
            backbone = nn.Sequential(layers_res)
            # 2048
            self.features = backbone
        else:
            res101 = resnet.__dict__['resnet101'](pretrained=False, norm_layer=FrozenBatchNorm2d)
            layers_res = OrderedDict()
            for k, v in res101.named_children():
                if k in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
                    layers_res[k] = v
            backbone = nn.Sequential(layers_res)
            # 2048
            self.features = backbone
        network.set_trainable(self.features, requires_grad=False)
        self.roi_pool = RoIPool((14, 14), 1.0 / 16)
        self.inter_layer = res101.layer4
        network.set_trainable(self.inter_layer, requires_grad=False)
        self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
        self.fc6 = nn.Linear(2048, 256)
        self.fc_obj = nn.Linear(2048, self.n_obj)
        self.gat_conv_rel1 = GatedGraphConv(out_channels=256, num_layers=2)
        self.rel_1 = nn.Linear(768, 256)
        self.rel_2 = nn.Linear(256, self.n_rel)
        self.fc_lov = nn.Linear(8, 256)
        self.fc_sub_obj = nn.Linear(2 * 300, 256)

        self.initialize_param()
    def __init__(self,
                 embeddings,
                 edge_embeddings,
                 num_inputs,
                 num_units,
                 num_layers=1,
                 geometric_layer="rgcn"):
        super(DualGraphEncoder, self).__init__()
        self.embeddings = embeddings
        self.edge_embeddings = edge_embeddings
        self.tanh = nn.Tanh()

        self.dropout = nn.Dropout(0.3)

        # The number of expected features in the input x
        self.num_layers = num_layers
        self.num_inputs = num_inputs

        # The number of features in the hidden state h
        self.num_units = num_units
        self.geometric_layer = geometric_layer

        self.gated_gcn_in = GatedGraphConv(num_inputs, 5)
        self.gated_gcn_out = GatedGraphConv(num_inputs, 5)

        self.gated_gat_in = GATConv(num_inputs,
                                    num_units,
                                    heads=3,
                                    concat=False,
                                    dropout=0.3)
        #self.gated_gat_out = GATConv(num_inputs, num_units, heads=3, concat=False, dropout=0.3)

        hidden_size = self.num_units * 3 // 2

        self.bilstm = nn.LSTM(self.num_inputs * 3,
                              hidden_size,
                              num_layers=2,
                              bidirectional=True,
                              dropout=0.3)
Пример #21
0
    def __init__(self, config):
        super(Net, self).__init__()

        annotation_size = config["hidden_size_orig"]
        hidden_size = config["gnn_h_size"]
        n_steps = config["num_timesteps"]
        num_cls = 2

        self.reduce = nn.Linear(annotation_size, hidden_size)
        self.conv = GatedGraphConv(hidden_size, n_steps)
        self.agg = GlobalAttention(nn.Linear(hidden_size, 1),
                                   nn.Linear(hidden_size, 2))
        self.lin = nn.Linear(hidden_size, num_cls)
Пример #22
0
    def __init__(
        self,
        feat_size=19,
        gather_width=64,
        k=2,
        neighbor_threshold=None,
        output_pool_result=False,
        bn_track_running_stats=False,
    ):
        super(PotentialNetPropagation, self).__init__()
        assert neighbor_threshold is not None

        self.neighbor_threshold = neighbor_threshold
        self.bn_track_running_stats = bn_track_running_stats
        self.edge_attr_size = 1

        self.k = k
        self.gather_width = gather_width
        self.feat_size = feat_size
        self.edge_network_nn = nn.Sequential(
            nn.Linear(self.edge_attr_size, int(self.feat_size / 2)),
            nn.Softsign(),
            nn.Linear(int(self.feat_size / 2), self.feat_size),
            nn.Softsign(),
        )

        self.edge_network = NNConv(
            self.feat_size,
            self.edge_attr_size * self.feat_size,
            nn=self.edge_network_nn,
            root_weight=True,
            aggr="add",
        )
        self.gate = GatedGraphConv(self.feat_size,
                                   self.k,
                                   edge_network=self.edge_network)

        self.attention = PotentialNetAttention(
            net_i=nn.Sequential(
                nn.Linear(self.feat_size * 2, self.feat_size),
                nn.Softsign(),
                nn.Linear(self.feat_size, self.gather_width),
                nn.Softsign(),
            ),
            net_j=nn.Sequential(nn.Linear(self.feat_size, self.gather_width),
                                nn.Softsign()),
        )
        self.output_pool_result = output_pool_result
        if self.output_pool_result:
            self.global_add_pool = global_add_pool
Пример #23
0
    def __init__(self, config: DictConfig, vocab: Vocabulary,
                 vocabulary_size: int, pad_idx: int):
        super(GatedGraphConvEncoder, self).__init__()
        self.__config = config
        self.__pad_idx = pad_idx
        self.__st_embedding = STEncoder(config, vocab, vocabulary_size,
                                        pad_idx)

        self.input_GCL = GatedGraphConv(out_channels=config.hidden_size,
                                        num_layers=config.n_gru)

        self.input_GPL = TopKPooling(config.hidden_size,
                                     ratio=config.pooling_ratio)

        for i in range(config.n_hidden_layers - 1):
            setattr(
                self, f"hidden_GCL{i}",
                GatedGraphConv(out_channels=config.hidden_size,
                               num_layers=config.n_gru))
            setattr(
                self, f"hidden_GPL{i}",
                TopKPooling(config.hidden_size, ratio=config.pooling_ratio))
        self.attpool = GlobalAttention(torch.nn.Linear(config.hidden_size, 1))
Пример #24
0
    def __init__(
        self,
        embed_dim: int,
        num_layers: int,
        heads: int = 8,
        normalization: str = "batch",
        feed_forward_hidden: int = 512,
        pooling_method: str = "add",
    ) -> None:
        super().__init__()
        self.embed_dim = embed_dim
        self.num_layers = num_layers
        self.heads = heads
        assert (self.embed_dim % self.heads) == 0
        self.pooling_func = get_pooling_func(pooling_method)
        self.norm_class = get_normalization_class(normalization)
        self.norm_out = GraphNorm(self.embed_dim)
        self.gnn_layer = GatedGraphConv(out_channels=self.embed_dim,
                                        num_layers=1)
        self.reversed_gnn_layer = GatedGraphConv(out_channels=self.embed_dim,
                                                 num_layers=1)

        self.edge_extractor = EdgeFeatureExtractor(in_channels=self.embed_dim,
                                                   edge_dim=self.embed_dim)
 def __init__(self, hidden_size, n_node):
     super(testGNN, self).__init__()
     self.hidden_size, self.n_node = hidden_size, n_node
     self.embedding = torch.nn.Embedding(self.n_node, self.hidden_size)
     self.gcn = GCNConv(self.hidden_size, self.hidden_size)
     self.gcn2 = GCNConv(self.hidden_size, self.hidden_size)
     self.ggcn = GatedGraphConv(self.hidden_size, self.hidden_size)
     self.gat1 = GATConv(self.hidden_size,
                         self.hidden_size,
                         heads=8,
                         negative_slope=0.2)
     self.gat2 = GATConv(8 * self.hidden_size,
                         self.hidden_size,
                         heads=1,
                         negative_slope=0.2)
     self.e2s = Embedding2Score(self.hidden_size)
Пример #26
0
    def __init__(self, embed_size, vocab_size, gnn_hidden_size, gnn_layers, encoder_hidden_size, encoder_layers, n_heads, gnn, encoder, matching, \
        aggregation, use_keywords, keyword_encoder, keyword_score_weight=1, dropout=0, CN_hopk_edge_matrix_mask=None, nodeid2wordid=None, \
                keywordid2wordid=None, keywordid2nodeid=None, concept_encoder="mean", combine_word_concepts="concat"):
        super(CoGraphMatcher, self).__init__()
        self.embed_size = embed_size
        self.vocab_size = vocab_size
        self.gnn_hidden_size = gnn_hidden_size
        self.encoder_hidden_size = encoder_hidden_size
        self.gnn_layers = gnn_layers
        self.encoder_layers = encoder_layers
        self.n_heads = n_heads
        self.gnn = gnn
        self.encoder = encoder
        self.aggregation = aggregation
        self.use_keywords = use_keywords
        self.keyword_score_weight = keyword_score_weight
        self.keyword_encoder = keyword_encoder
        self.dropout = dropout
        self.CN_hopk_edge_matrix_mask = CN_hopk_edge_matrix_mask
        self.nodeid2wordid = nodeid2wordid
        self.keywordid2wordid = keywordid2wordid
        self.keywordid2nodeid = keywordid2nodeid
        self.concept_encoder = concept_encoder
        self.combine_word_concepts = combine_word_concepts
        self.num_nodes = nodeid2wordid.shape[0]

        self.embedding = nn.Embedding(vocab_size, embed_size)

        # GNN learning
        encoder_input_size = gnn_hidden_size

        if gnn == "GatedGraphConv":
            self.conv1 = GatedGraphConv(gnn_hidden_size, num_layers=gnn_layers)

        if self.encoder == "GRU":
            self.utterance_encoder = nn.GRU(encoder_input_size,
                                            encoder_hidden_size,
                                            encoder_layers,
                                            batch_first=True,
                                            dropout=dropout,
                                            bidirectional=True)
            self.candidate_encoder = nn.GRU(encoder_input_size,
                                            encoder_hidden_size,
                                            encoder_layers,
                                            batch_first=True,
                                            dropout=dropout,
                                            bidirectional=True)
Пример #27
0
 def __init__(self, hidden_size, n_node):
     super(GNNModel, self).__init__()
     self.hidden_size, self.n_node = hidden_size, n_node
     self.embedding = nn.Embedding(self.n_node, self.hidden_size)
     self.gat1 = GATConv(self.hidden_size,
                         self.hidden_size,
                         heads=8,
                         negative_slope=0.2)
     self.gat2 = GATConv(8 * self.hidden_size,
                         self.hidden_size,
                         heads=1,
                         negative_slope=0.2)
     self.sage1 = SAGEConv(self.hidden_size, self.hidden_size)
     self.sage2 = SAGEConv(self.hidden_size, self.hidden_size)
     self.gated = GatedGraphConv(self.hidden_size, num_layers=2)
     self.e2s = Embedding2Score(self.hidden_size)
     self.loss_function = nn.CrossEntropyLoss()
     self.reset_parameters()
Пример #28
0
    def __init__(self, node_dim: int, num_convs: int,
                 num_layers: Union[int, List[int]], num_edge_types: int):
        super().__init__()
        self.convs = []
        if isinstance(num_layers, int):
            num_layers = [num_layers] * num_convs

        for i in range(num_convs):
            self.convs.append(GatedGraphConv(node_dim, num_layers=1))

        #  Weight given to each edge type
        self.edge_type_weights = torch.nn.Parameter(
            torch.FloatTensor((1.0, ) * num_edge_types))

        #  Simple linear layer for the output of conv3
        self.lin1 = torch.nn.Linear(node_dim, node_dim)

        #  Slope for leaky relus
        self.neg_slope = 0.01
Пример #29
0
    def init_model(self, n_class, feature_num):
        num_layers = self.hyperparameters['num_layers']
        hidden_size = int(2**self.hyperparameters['hidden'])
        lr = self.hyperparameters['lr']
        gated_conv_layers = int(self.hyperparameters['gated_conv_layers'])

        self.input_linear = Linear(feature_num, hidden_size)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(
                GatedGraphConv(out_channels=hidden_size,
                               num_layers=gated_conv_layers))
        self.output_linear = Linear(hidden_size, n_class)
        self.optimizer = torch.optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=5e-4)

        self = self.to('cuda')

        torch.cuda.empty_cache()
Пример #30
0
    def __init__(self, poi_size, embedding_matrix, struct_topology, user_history_data_dict):
        super(Net, self).__init__()
        self.verbose = True
        self.use_cuda = True
        self.embedding_dim = Parameters.type_embedding_dim
        self.embedding_matrix = embedding_matrix
        self.struct_topology = struct_topology
        self.hidden_dim = Parameters.HIDDEN_SIZE
        self.linear_size = 30
        self.batch_size = 4
        self.poi_size = poi_size
        self.drop_out = Parameters.DROPOUT_RATE
        self.num_classes = 5
        self.n_epochs = 3
        self.poi_size = embedding_matrix.shape[0]
        self.user_history_data_dict = user_history_data_dict
        self.conv3 = GatedGraphConv(self.hidden_dim, 3)
        # self.conv4 = ChebConv(self.hidden_dim, 56, 2)
        # self.conv5 = ChebConv(16, self.num_classes, 2)

        self.fc1 = nn.Sequential(
            nn.BatchNorm1d(self.hidden_dim * 2),
            nn.Linear(self.hidden_dim * 2, self.linear_size),
            nn.ReLU(),
            nn.BatchNorm1d(self.linear_size),
            nn.Dropout(self.drop_out)
        )

        self.fc2 = nn.Sequential(
            nn.BatchNorm1d(self.hidden_dim * 2),
            nn.Linear(self.hidden_dim * 2, self.linear_size),
            nn.ReLU(),
            nn.BatchNorm1d(self.linear_size),
            nn.Dropout(self.drop_out),
            nn.Linear(self.linear_size, self.num_classes)
        )

        self.embed = nn.Embedding(self.poi_size, self.embedding_dim, padding_idx=0)
        self.embed.weight.data.copy_(torch.from_numpy(embedding_matrix))
        self.bat_nor_embed = nn.BatchNorm1d(self.embedding_dim)
        self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, batch_first=True)