Beispiel #1
0
    def define_model(self):
        out_channels = self.args.attention_out_channels
        multi_head = self.args.multi_head
        channels = multi_head * out_channels

        self.conv1 = GATConv(in_channels=self.train_dataset.num_features,
                             out_channels=out_channels,
                             heads=multi_head)
        self.lin1 = torch.nn.Linear(
            in_features=self.train_dataset.num_features, out_features=channels)
        self.conv2 = GATConv(in_channels=channels,
                             out_channels=out_channels,
                             heads=multi_head)
        self.lin2 = torch.nn.Linear(in_features=channels,
                                    out_features=channels)
        self.conv3 = GATConv(in_channels=channels,
                             out_channels=self.train_dataset.num_classes,
                             heads=6,
                             concat=False)
        self.lin3 = torch.nn.Linear(
            in_features=channels, out_features=self.train_dataset.num_classes)
def test_gat_conv_with_edge_attr():
    x = torch.randn(4, 8)
    edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 1, 1]])
    edge_weight = torch.randn(edge_index.size(1))
    edge_attr = torch.randn(edge_index.size(1), 4)

    conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value=0.5)
    out = conv(x, edge_index, edge_weight)
    assert out.size() == (4, 64)

    conv = GATConv(8, 32, heads=2, edge_dim=1, fill_value='mean')
    out = conv(x, edge_index, edge_weight)
    assert out.size() == (4, 64)

    conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value=0.5)
    out = conv(x, edge_index, edge_attr)
    assert out.size() == (4, 64)

    conv = GATConv(8, 32, heads=2, edge_dim=4, fill_value='mean')
    out = conv(x, edge_index, edge_attr)
    assert out.size() == (4, 64)
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2,
                 dropout=0.5, heads=2):
        super(GAT, self).__init__()

        self.convs = nn.ModuleList()
        self.convs.append(
            GATConv(in_channels, hidden_channels, heads=heads, concat=True))

        self.bns = nn.ModuleList()
        self.bns.append(nn.BatchNorm1d(hidden_channels*heads))
        for _ in range(num_layers - 2):

            self.convs.append(
                    GATConv(hidden_channels*heads, hidden_channels, heads=heads, concat=True) ) 
            self.bns.append(nn.BatchNorm1d(hidden_channels*heads))

        self.convs.append(
            GATConv(hidden_channels*heads, out_channels, heads=heads, concat=False))

        self.dropout = dropout
        self.activation = F.elu 
Beispiel #4
0
 def __init__(self, num_layers, hidden_list, activation, data):
     super(ModelGAT, self).__init__()
     assert len(hidden_list) == num_layers + 1
     self.linear_1 = Linear(data.num_features, hidden_list[0])
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GATConv(hidden_list[i], hidden_list[i + 1]))
     self.linear_2 = Linear(hidden_list[-1], data.num_class)
     if activation == "relu":
         self.activation = relu
     elif activation == "leaky_relu":
         self.activation = leaky_relu
Beispiel #5
0
 def __init__(self,
              u_hidden_size,
              i_hidden_size,
              number,
              i_hidden_list,
              hidden_list,
              args,
              heads=6,
              dataset='book',
              mode='GAT'):
     super(Model, self).__init__()
     self.u_hidden_size, self.i_hidden_size = u_hidden_size, i_hidden_size
     self.u_nodes, self.i_nodes = number['u'], number['a']
     self.u_embedding = nn.Embedding(self.u_nodes, self.u_hidden_size)
     self.i_embedding = nn.Embedding(self.i_nodes, self.i_hidden_size)
     self.convs = nn.ModuleList()
     self.mode = mode
     i_hidden_list = [i_hidden_size] + i_hidden_list
     if mode == 'GCN':
         self.convs = nn.ModuleList([
             GCNConv(i_hidden_list[i - 1], i_hidden_list[i])
             for i in range(1, len(i_hidden_list))
         ])
     elif mode == 'GAT':
         self.convs = nn.ModuleList([
             GATConv(i_hidden_list[i - 1],
                     i_hidden_list[i],
                     heads=heads,
                     concat=False) for i in range(1, len(i_hidden_list))
         ])
     elif mode == 'HGCN':
         self.convs = nn.ModuleList([
             HGCN(i_hidden_list[i - 1],
                  i_hidden_list[i],
                  c_in=args.c_in,
                  c_out=args.c_out) for i in range(1, len(i_hidden_list))
         ])
     elif mode == 'HNN':
         self.convs = nn.ModuleList([
             HNN(i_hidden_list[i - 1], i_hidden_list[i], c=args.c_in)
             for i in range(1, len(i_hidden_list))
         ])
     hidden_list = [i_hidden_list[-1] + u_hidden_size] + hidden_list
     self.liners = nn.ModuleList([
         nn.Linear(hidden_list[i - 1], hidden_list[i])
         for i in range(1, len(hidden_list))
     ])
     if hidden_list[-1] == 1:
         self.final = torch.sigmoid
         self.loss = nn.BCELoss()
     else:
         self.final = torch.softmax
         self.loss = nn.NLLLoss()
Beispiel #6
0
 def __init__(self, num_node_features, num_classes):
     '''
     Input layer num_node_features = num_node_features
     Hidden layer node = 4*16
     Hidden layer node = 4*16
     Out layer num_classes = num_classes
     '''
     super(GAT, self).__init__()
     self.conv1 = GATConv(in_channels=num_node_features,
                          out_channels=16,
                          heads=4,
                          dropout=0.2)
     self.conv2 = GATConv(in_channels=4 * 16,
                          out_channels=16,
                          heads=4,
                          dropout=0.2)
     self.conv3 = GATConv(in_channels=4 * 16,
                          out_channels=num_classes,
                          heads=6,
                          concat=False,
                          dropout=0.2)
    def __init__(self,
                 nfeat,
                 nhid,
                 nclass,
                 heads=8,
                 output_heads=1,
                 dropout=0.5,
                 lr=0.01,
                 weight_decay=5e-4,
                 with_bias=True,
                 device=None):

        super(GAT, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device

        self.nfeat = nfeat
        self.hidden_sizes = [nhid]
        self.nclass = nclass

        self.conv1 = GATConv(nfeat,
                             nhid,
                             heads=heads,
                             dropout=dropout,
                             bias=with_bias)

        self.conv2 = GATConv(nhid * heads,
                             nclass,
                             heads=output_heads,
                             concat=False,
                             dropout=dropout,
                             bias=with_bias)

        self.dropout = dropout
        self.weight_decay = weight_decay
        self.lr = lr
        self.output = None
        self.best_model = None
        self.best_output = None
def test_gat_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    conv = GATConv(in_channels, out_channels, heads=2, dropout=0.5)
    assert conv.__repr__() == 'GATConv(16, 32, heads=2)'
    assert conv(x, edge_index).size() == (num_nodes, 2 * out_channels)
    assert conv((x, None), edge_index).size() == (num_nodes, 2 * out_channels)
    out = conv(x, edge_index, return_attention_weights=True)
    assert out[0].size() == (num_nodes, 2 * out_channels)
    assert out[1].size() == (edge_index.size(1) + num_nodes, 2)
    assert conv.alpha is None

    conv = GATConv(in_channels, out_channels, heads=2, concat=False)
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    assert conv((x, None), edge_index).size() == (num_nodes, out_channels)
    out = conv(x, edge_index, return_attention_weights=True)
    assert out[0].size() == (num_nodes, out_channels)
    assert out[1].size() == (edge_index.size(1) + num_nodes, 2)
    assert conv.alpha is None
Beispiel #9
0
 def __init__(self, n_features, n_outputs, dim=100):
     super(GIAT, self).__init__()
     # the  
     nn1 = Seq(Linear(n_features, 2*dim), ReLU(), Linear(2*dim, dim))
     self.conv1 = GINConv(nn1)
     self.bn1 = torch.nn.BatchNorm1d(dim)
     self.conv2 = GATConv(dim, dim,heads=4)
     
     # Preparation of the Fully Connected Layer
     self.fc1 = Linear(4*dim, 3*dim)
     self.fc2 = Linear(3*dim, 2*dim)
     self.fc3 = Linear(2*dim, dim)
     self.fc4 = Linear(dim, 1)
Beispiel #10
0
 def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm=True,
              use_RTE=True):
     super(GeneralConv, self).__init__()
     self.conv_name = conv_name
     if self.conv_name == 'hgt':
         self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm, use_RTE)
     elif self.conv_name == 'dense_hgt':
         self.base_conv = DenseHGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm,
                                       use_RTE)
     elif self.conv_name == 'gcn':
         self.base_conv = GCNConv(in_hid, out_hid)
     elif self.conv_name == 'gat':
         self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)
Beispiel #11
0
    def make_graph_layer(self, hidden_dim, layer_idx):
        # holdover from the Benchmarking GNNs paper where they found this useful
        if layer_idx == self.num_graph_layers - 1:
            heads = 1
        else:
            heads = self.heads

        return GATConv(
            hidden_dim,
            hidden_dim // heads,
            heads=heads,
            dropout=self.gat_dropout,
        )
Beispiel #12
0
 def __init__(self,
              num_features,
              num_classes,
              hid_layer=10,
              dropout=0.3,
              activation="elu",
              heads=[8, 1]):
     super(GatNet, self).__init__()
     self.conv1 = GATConv(num_features,
                          hid_layer,
                          heads=heads[0],
                          dropout=dropout)
     self.conv2 = GATConv(hid_layer * heads[0],
                          num_classes,
                          heads=heads[1],
                          concat=False,
                          dropout=dropout)
     self.dropout = dropout
     if activation == 'elu':
         self.activation_func = F.elu
     else:
         self.activation_func = F.relu
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 heads):
        super(GAT, self).__init__()

        self.num_layers = num_layers

        self.convs = torch.nn.ModuleList()
        self.convs.append(GATConv(dataset.num_features, hidden_channels,
                                  heads))
        for _ in range(num_layers - 2):
            self.convs.append(
                GATConv(heads * hidden_channels, hidden_channels, heads))
        self.convs.append(
            GATConv(heads * hidden_channels, out_channels, heads,
                    concat=False))

        self.skips = torch.nn.ModuleList()
        self.skips.append(Lin(dataset.num_features, hidden_channels * heads))
        for _ in range(num_layers - 2):
            self.skips.append(
                Lin(hidden_channels * heads, hidden_channels * heads))
        self.skips.append(Lin(hidden_channels * heads, out_channels))
    def __init__(self,
                 inpt_size,
                 hidden_size,
                 output_size,
                 posemb_size,
                 dropout=0.5):
        # inpt_size: utter_hidden_size + user_embed_size
        super(GCNContext, self).__init__()
        # self.conv1 = GCNConv(inpt_size + posemb_size, hidden_size)
        # self.conv2 = GCNConv(hidden_size + posemb_size, hidden_size)
        # self.conv3 = GCNConv(hidden_size + posemb_size, hidden_size)
        self.conv1 = GATConv(inpt_size + posemb_size, hidden_size, heads=8)
        self.conv2 = GATConv(8 * hidden_size, hidden_size, heads=8)
        self.conv3 = GATConv(8 * hidden_size, hidden_size, heads=8)
        self.bn1 = nn.BatchNorm1d(num_features=hidden_size)
        self.bn2 = nn.BatchNorm1d(num_features=hidden_size)
        self.bn3 = nn.BatchNorm1d(num_features=hidden_size)

        self.linear = nn.Linear(8 * hidden_size, output_size)
        self.drop = nn.Dropout(p=dropout)
        self.posemb = nn.Embedding(
            100, posemb_size)  # 100 is far bigger than the max turn lengths
    def __init__(self,
                 in_channels: int,
                 hidden_channels: int,
                 out_channels: int,
                 edge_dim: int,
                 num_layers: int,
                 num_timesteps: int,
                 dropout: float = 0.0):
        super().__init__()

        self.num_layers = num_layers
        self.num_timesteps = num_timesteps
        self.dropout = dropout

        self.lin1 = Linear(in_channels, hidden_channels)

        conv = GATEConv(hidden_channels, hidden_channels, edge_dim, dropout)
        gru = GRUCell(hidden_channels, hidden_channels)
        self.atom_convs = torch.nn.ModuleList([conv])
        self.atom_grus = torch.nn.ModuleList([gru])
        for _ in range(num_layers - 1):
            conv = GATConv(hidden_channels,
                           hidden_channels,
                           dropout=dropout,
                           add_self_loops=False,
                           negative_slope=0.01)
            self.atom_convs.append(conv)
            self.atom_grus.append(GRUCell(hidden_channels, hidden_channels))

        self.mol_conv = GATConv(hidden_channels,
                                hidden_channels,
                                dropout=dropout,
                                add_self_loops=False,
                                negative_slope=0.01)
        self.mol_gru = GRUCell(hidden_channels, hidden_channels)

        self.lin2 = Linear(hidden_channels, out_channels)

        self.reset_parameters()
Beispiel #16
0
def get_encoder(layer_config, gnn_type, **kwargs):
    """
    Builds the GNN backbone as required
    """
    if gnn_type == "gcn":
        return nn.ModuleList([GCNConv(layer_config[i-1], layer_config[i]) for i in range(1, len(layer_config))])
    elif gnn_type == "sage":
        return nn.ModuleList([SAGEConv(layer_config[i-1], layer_config[i]) for i in range(1, len(layer_config))])
    elif gnn_type == "gat":
        heads = kwargs['heads'] if 'heads' in kwargs else [8] * len(layer_config)
        concat = kwargs['concat'] if 'concat' in kwargs else True
        return nn.ModuleList([GATConv(layer_config[i-1], layer_config[i] // heads[i-1], heads=heads[i-1], concat=concat)
                              for i in range(1, len(layer_config))])
Beispiel #17
0
    def __init__(self,
                 num_timesteps=4,
                 emb_dim=300,
                 num_layers=5,
                 drop_ratio=0,
                 num_tasks=1,
                 **args):
        super(AttentiveFP, self).__init__()

        self.num_layers = num_layers
        self.num_timesteps = num_timesteps
        self.drop_ratio = drop_ratio

        self.atom_encoder = AtomEncoder(emb_dim)
        self.bond_encoder = BondEncoder(emb_dim=emb_dim)

        conv = GATEConv(emb_dim, emb_dim, emb_dim, drop_ratio)
        gru = GRUCell(emb_dim, emb_dim)
        self.atom_convs = torch.nn.ModuleList([conv])
        self.atom_grus = torch.nn.ModuleList([gru])
        for _ in range(num_layers - 1):
            conv = GATConv(emb_dim,
                           emb_dim,
                           dropout=drop_ratio,
                           add_self_loops=False,
                           negative_slope=0.01)
            self.atom_convs.append(conv)
            self.atom_grus.append(GRUCell(emb_dim, emb_dim))

        self.mol_conv = GATConv(emb_dim,
                                emb_dim,
                                dropout=drop_ratio,
                                add_self_loops=False,
                                negative_slope=0.01)
        self.mol_gru = GRUCell(emb_dim, emb_dim)

        self.graph_pred_linear = Linear(emb_dim, num_tasks)

        self.reset_parameters()
Beispiel #18
0
 def __init__(self,
              features_num,
              num_class,
              num_layers=3,
              hidden=32,
              **kwargs):
     super(GAT, self).__init__()
     hidden = max(hidden, num_class * 2)
     self.convs = nn.ModuleList()
     for _ in range(num_layers):
         self.convs.append(GATConv(hidden, hidden))
     self.input_lin = nn.Linear(features_num, hidden)
     self.output_lin = nn.Linear(hidden, num_class)
    def __init__(self, in_feats, n_hidden_per_head, n_classes, activation,
                 dropout, attn_dropout, heads):
        # sense-free call to superclass
        super(GAT, self).__init__(1, 1, 1, 1, 1, 1, GraphConv)

        # now override the network architecture, because GAT is special
        self.layers = nn.ModuleList()
        self.layers.append(
            GATConv(in_feats,
                    n_hidden_per_head,
                    heads=heads[0],
                    dropout=attn_dropout))
        # hidden layers
        self.layers.append(
            GATConv(n_hidden_per_head * heads[0],
                    n_classes,
                    heads=heads[1],
                    concat=True,
                    dropout=attn_dropout))
        # output layer
        self.activation = activation
        self.dropout = dropout
Beispiel #20
0
    def __init__(self,
                 dataset,
                 channels,
                 heads=1,
                 dropout=0.6,
                 attention_dropout=0.3):
        super(BiGAT, self).__init__()
        self.conv_st = []
        self.conv_ts = []
        self.dropout = dropout
        self.attention_dropout = attention_dropout
        channels_output = [dataset.num_node_features
                           ] + [c * 2 * heads for c in channels]
        channels = [dataset.num_node_features] + channels
        for i in range(len(channels) - 1):
            conv_st = GATConv(channels_output[i],
                              channels[i + 1],
                              heads=heads,
                              dropout=self.attention_dropout)
            self.add_module('conv_st' + str(i), conv_st)
            self.conv_st.append(conv_st)

            conv_ts = GATConv(channels_output[i],
                              channels[i + 1],
                              heads=heads,
                              dropout=self.attention_dropout)
            self.add_module('conv_ts' + str(i), conv_ts)
            self.conv_ts.append(conv_ts)

        if dataset.name == 'PubMed':
            self.last = GATConv(channels_output[-1],
                                dataset.num_classes,
                                heads=heads,
                                concat=False,
                                dropout=self.attention_dropout)
        else:
            self.last = GATConv(channels_output[-1],
                                dataset.num_classes,
                                dropout=self.attention_dropout)
Beispiel #21
0
    def __init__(self, model: str, in_channels: int, out_channels: int,
                 hidden_channels: int, num_layers: int, heads: int = 4,
                 dropout: float = 0.5):
        super().__init__()
        self.save_hyperparameters()
        self.model = model.lower()
        self.dropout = dropout

        self.convs = ModuleList()
        self.norms = ModuleList()
        self.skips = ModuleList()

        if self.model == 'gat':
            self.convs.append(
                GATConv(in_channels, hidden_channels // heads, heads))
            self.skips.append(Linear(in_channels, hidden_channels))
            for _ in range(num_layers - 1):
                self.convs.append(
                    GATConv(hidden_channels, hidden_channels // heads, heads))
                self.skips.append(Linear(hidden_channels, hidden_channels))

        elif self.model == 'graphsage':
            self.convs.append(SAGEConv(in_channels, hidden_channels))
            for _ in range(num_layers - 1):
                self.convs.append(SAGEConv(hidden_channels, hidden_channels))

        for _ in range(num_layers):
            self.norms.append(BatchNorm1d(hidden_channels))

        self.mlp = Sequential(
            Linear(hidden_channels, hidden_channels),
            BatchNorm1d(hidden_channels),
            ReLU(inplace=True),
            Dropout(p=self.dropout),
            Linear(hidden_channels, out_channels),
        )

        self.acc = Accuracy()
Beispiel #22
0
    def __init__(self,
                 dataset,
                 channels,
                 heads=1,
                 dropout=0.6,
                 attention_dropout=0.3):
        super(MonoGAT, self).__init__()
        channels = [dataset.num_node_features
                    ] + channels + [dataset.num_classes]

        self.dropout = dropout
        self.attention_dropout = attention_dropout

        self.conv = []
        for i in range(1, len(channels)):
            if i == 1:
                conv = GATConv(channels[i - 1],
                               channels[i],
                               heads=heads,
                               dropout=self.attention_dropout)
            elif i == len(channels) - 1:
                if dataset.name == 'PubMed':
                    conv = GATConv(channels[i - 1] * heads,
                                   channels[i],
                                   heads=heads,
                                   concat=False,
                                   dropout=self.attention_dropout)
                else:
                    conv = GATConv(channels[i - 1] * heads,
                                   channels[i],
                                   dropout=self.attention_dropout)
            else:
                conv = GATConv(channels[i - 1] * heads,
                               channels[i],
                               heads=heads,
                               dropout=self.attention_dropout)
            self.add_module(str(i), conv)
            self.conv.append(conv)
Beispiel #23
0
    def __init__(self, *args, **kwargs):
        super().__init__()
        ignore = GATConv(*args, **kwargs)
        self.register_buffer('weight',
                             to_var(ignore.weight.data, requires_grad=True))
        self.register_buffer('bias',
                             to_var(ignore.bias.data, requires_grad=True))
        self.register_buffer('att', to_var(ignore.att.data,
                                           requires_grad=True))

        self.gat = [
            GAT(ignore.weight.shape[0], ignore.weight.shape[1], self.weight,
                self.att, self.bias)
        ]
Beispiel #24
0
    def __init__(self, D, C, G=0, dropout=0.25, task='graph'):
        super(GATNet, self).__init__()

        self.D = D
        self.C = C
        self.G = G

        self.dropout = dropout
        self.task = task

        self.conv1 = GATConv(self.D, self.D, heads=2, dropout=dropout)
        self.conv2 = GATConv(self.D * 2,
                             self.D,
                             heads=1,
                             concat=False,
                             dropout=dropout)

        if (self.G > 0):
            self.Z = self.D + self.G
        else:
            self.Z = self.D
        self.mlp1 = Linear(self.Z, self.Z)
        self.mlp2 = Linear(self.Z, self.C)
Beispiel #25
0
 def __init__(self, num_meta_paths, in_size, out_size, layer_num_heads,
              dropout):
     super(GATLayer, self).__init__()
     # One GAT layer for each meta path based adjacency matrix
     self.gat_layers = nn.ModuleList()
     for i in range(num_meta_paths):
         self.gat_layers.append(
             GATConv(in_channels=in_size,
                     out_channels=out_size,
                     heads=layer_num_heads,
                     dropout=dropout))
     self.semantic_attention = SemanticAttention(in_size=out_size *
                                                 layer_num_heads)
     self.num_meta_paths = num_meta_paths
Beispiel #26
0
 def get_layer(gnn_type):
     if gnn_type == 'ChebConv': layer = ChebConv(h, h, K=2)
     elif gnn_type == 'GCNConv': layer = GCNConv(h, h)
     elif gnn_type == 'GINConv':
         dnn = nn.Sequential(nn.Linear(h, h), nn.LeakyReLU(),
                             nn.Linear(h, h))
         layer = GINConv(dnn)
     elif gnn_type == 'SAGEConv':
         layer = SAGEConv(h, h, normalize=True)
     elif gnn_type == 'GATConv':
         layer = GATConv(h, h)
     else:
         raise NotImplementedError
     return layer
Beispiel #27
0
 def __init__(self, num_nodes):
     super(GraphNet, self).__init__()
     self.gat = GATConv(args.bert_dim,
                        args.emb_dim,
                        heads=args.graph_heads,
                        dropout=args.dropout)
     self.dropout = nn.Dropout(args.dropout)
     self.linear_out = nn.Linear(
         (args.emb_dim * args.graph_heads) + args.emb_dim, args.emb_dim)
     self.score = nn.Linear(args.emb_dim, 1)
     self.context_net = nn.Sequential(
         nn.Linear(args.emb_dim * 2, args.emb_dim), nn.LeakyReLU(),
         Flatten(), nn.Dropout(args.dropout),
         nn.Linear(args.emb_dim, num_nodes))
Beispiel #28
0
 def __init__(self, hidden_size, n_node, use_san=False, use_gat=False):
     super(GNNModel, self).__init__()
     self.hidden_size, self.n_node = hidden_size, n_node
     self.use_san = use_san
     self.use_gat = use_gat
     self.embedding = nn.Embedding(self.n_node, self.hidden_size)
     if not use_gat:
         self.gated = InOutGGNN(self.hidden_size, num_layers=1)
     else:
         self.gat1 = GATConv(self.hidden_size,
                             self.hidden_size,
                             heads=4,
                             negative_slope=0.2)
         self.gat2 = GATConv(4 * self.hidden_size,
                             self.hidden_size,
                             heads=1,
                             negative_slope=0.2)
     if not use_san:
         self.e2s = Embedding2Score(self.hidden_size)
     else:
         self.e2s = Embedding2ScoreSAN(self.hidden_size)
     self.loss_function = nn.CrossEntropyLoss()
     self.reset_parameters()
Beispiel #29
0
 def __init__(self, in_features, out_features, aggregation, attention,
              **kwargs):
     super().__init__()
     assert attention in ("constant", "gcn", "gat")
     if attention == "constant":
         self.op = ConstantConv(in_features, out_features)
     elif attention == "gcn":
         self.op = GCNConv(in_features, out_features)
     else:
         self.op = GATConv(in_features,
                           out_features,
                           dropout=config.DROPOUT)
     assert aggregation in ("add", "mean", "max")
     self.op.aggr = aggregation
    def __init__(self, num_features, num_classes):
        super(TestModel, self).__init__()
        self.name = "test_model"
        self.version = "v1"
        self.num_features = num_features
        self.num_classes = num_classes

        self.bn1 = torch.nn.BatchNorm1d(num_features=num_features)

        self.conv1 = GATConv(num_features, 128)
        self.bn2 = torch.nn.BatchNorm1d(num_features=128)
        self.pool1 = TopKPooling(128, ratio=0.8)

        self.lin1 = torch.nn.Linear(256, num_classes)