コード例 #1
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 tau=2,
                 alpha=10.0,
                 hids=[256],
                 acts=['gelu'],
                 dropout=0.6,
                 weight_decay=5e-3,
                 lr=0.001,
                 bias=True):

        super().__init__()
        mlp = []
        for hid, act in zip(hids, acts):
            mlp.append(
                Mlp(in_features, hid, act=act, dropout=dropout, bias=bias))
            in_features = hid
        self.mlp = Sequential(*mlp)
        self.classifier = nn.Linear(in_features, out_features, bias=bias)
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          weight_decay=weight_decay,
                                          lr=lr),
                     metrics=[Accuracy()])
        self.tau = tau
        self.alpha = alpha
コード例 #2
0
ファイル: sgc.py プロジェクト: freebird3366/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[],
                 acts=[],
                 K=2,
                 dropout=0.5,
                 weight_decay=5e-5,
                 lr=0.2,
                 use_bias=True):
        super().__init__()

        if hids or acts:
            raise RuntimeError(
                f"Arguments 'hids' and 'acts' are not supported to use in SGC (DGL backend)."
            )

        conv = SGConv(in_channels,
                      out_channels,
                      bias=use_bias,
                      k=K,
                      cached=True)
        self.conv = conv
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(conv.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
コード例 #3
0
ファイル: fastgcn.py プロジェクト: MemMeta/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GraphConvolution(in_features, out_features, bias=bias))
        conv = Sequential(*conv)
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
コード例 #4
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=1.0,
                 epsilon=0.9,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.alpha = alpha
        self.epsilon = epsilon
コード例 #5
0
ファイル: pdn.py プロジェクト: gitxchen/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 edge_features,
                 *,
                 hids=[32],
                 pdn_hids=32,
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True):
        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.fc = nn.Sequential(nn.Linear(edge_features, pdn_hids), nn.ReLU(),
                                nn.Linear(pdn_hids, 1), nn.Sigmoid())
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
コード例 #6
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[],
                 acts=[],
                 K=2,
                 dropout=None,
                 weight_decay=5e-5,
                 lr=0.2,
                 bias=False):
        super().__init__()

        if hids or acts:
            raise RuntimeError(
                f"Arguments 'hids' and 'acts' are not supported to use in SGC (PyG backend)."
            )

        # assert dropout, "unused"
        conv = SGConv(in_features,
                      out_features,
                      bias=bias,
                      K=K,
                      cached=True,
                      add_self_loops=True)
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(conv.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
コード例 #7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        self.layers = ModuleList()

        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GraphConv(inc,
                              hidden,
                              activation=get_activation(activation),
                              bias=use_bias)
            self.layers.append(layer)
            inc = hidden
        # output layer
        self.layers.append(GraphConv(inc, out_channels))

        self.dropout = Dropout(p=dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
コード例 #8
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[64],
                 acts=[None],
                 lambda_=5.0,
                 gamma=0.1,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        self.lambda_ = lambda_
        self.gamma = gamma
        assert hids, "hids should not empty"
        layers = nn.ModuleList()
        act_layers = nn.ModuleList()
        inc = in_features
        for hid, act in zip(hids, acts):
            layers.append(GCNConv(in_features,
                                  hid,
                                  bias=bias))
            act_layers.append(activations.get(act))
            inc = hid

        layers.append(GCNConv(inc,
                              out_features,
                              bias=bias))
        act_layers.append(activations.get(None))

        self.layers = layers
        self.act_layers = act_layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_features] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)

        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay),
                     metrics=[Accuracy()])

        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
コード例 #9
0
ファイル: simpgcn.py プロジェクト: freebird3366/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[64],
                 acts=[None],
                 lambda_=5.0,
                 gamma=0.1,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        super().__init__()
        self.lambda_ = lambda_
        self.gamma = gamma

        layers = nn.ModuleList()
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = GraphConvolution(inc,
                                     hid,
                                     activation=act,
                                     use_bias=use_bias)
            layers.append(layer)
            inc = hid

        layer = GraphConvolution(inc, out_channels, use_bias=use_bias)
        layers.append(layer)

        self.layers = layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_channels] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)

        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])

        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
コード例 #10
0
ファイル: gat.py プロジェクト: freebird3366/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        layers = ModuleList()
        act_fns = []
        paras = []

        inc = in_channels
        pre_head = 1
        for hid, num_head, act in zip(hids, num_heads, acts):
            layer = GATConv(inc * pre_head,
                            hid,
                            heads=num_head,
                            bias=use_bias,
                            dropout=dropout)
            layers.append(layer)
            act_fns.append(get_activation(act))
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hid
            pre_head = num_head

        layer = GATConv(inc * pre_head,
                        out_channels,
                        heads=1,
                        bias=use_bias,
                        concat=False,
                        dropout=dropout)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.act_fns = act_fns
        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
コード例 #11
0
ファイル: gat.py プロジェクト: kisekizzz/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[8],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        layers = ModuleList()
        paras = []

        inc = in_channels
        pre_head = 1
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            layer = SparseGraphAttention(inc * pre_head,
                                         hidden,
                                         activation=activation,
                                         attn_heads=n_head,
                                         reduction='concat',
                                         use_bias=use_bias)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hidden
            pre_head = n_head

        layer = SparseGraphAttention(inc * pre_head,
                                     out_channels,
                                     attn_heads=1,
                                     reduction='average',
                                     use_bias=use_bias)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
コード例 #12
0
ファイル: gat.py プロジェクト: kisekizzz/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[8],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01):

        super().__init__()

        layers = ModuleList()
        paras = []

        inc = in_channels
        pre_head = 1
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            layer = GATConv(inc * pre_head,
                            hidden,
                            activation=get_activation(activation),
                            num_heads=n_head,
                            feat_drop=dropout,
                            attn_drop=dropout)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hidden
            pre_head = n_head

        layer = GATConv(inc * pre_head,
                        out_channels,
                        num_heads=1,
                        feat_drop=dropout,
                        attn_drop=dropout)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
コード例 #13
0
    def __init__(self,
                 in_features,
                 out_features,
                 K=10,
                 alpha=0.1,
                 eps_U=0.3,
                 eps_V=1.2,
                 lamb_U=0.8,
                 lamb_V=0.8,
                 hids=[],
                 acts=[],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin = nn.Sequential(*lin)
        conv = SpectralEigenConv(in_features,
                                 out_features,
                                 bias=bias,
                                 K=K,
                                 alpha=alpha)

        self.lin = lin
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          weight_decay=weight_decay,
                                          lr=lr),
                     metrics=[Accuracy()])
        self.eps_U = eps_U
        self.eps_V = eps_V
        self.lamb_U = lamb_U
        self.lamb_V = lamb_V
コード例 #14
0
ファイル: gcn.py プロジェクト: freebird3366/GraphGallery
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        paras = []
        act_fns = []
        layers = ModuleList()
        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = GCNConv(inc,
                            hid,
                            cached=True,
                            bias=use_bias,
                            normalize=False)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            act_fns.append(get_activation(act))
            inc = hid

        layer = GCNConv(inc,
                        out_channels,
                        cached=True,
                        bias=use_bias,
                        normalize=False)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.act_fns = act_fns
        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
コード例 #15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-3,
                 lr=0.01,
                 use_bias=False,
                 K=10):
        super().__init__()

        layers = nn.ModuleList()
        acts_fn = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = nn.Linear(inc, hid, bias=use_bias)
            layers.append(layer)
            acts_fn.append(get_activation(act))
            inc = hid

        layer = nn.Linear(inc, out_channels, bias=use_bias)
        acts_fn.append(get_activation(act))
        layers.append(layer)

        conv = PropConvolution(out_channels,
                               K=K,
                               use_bias=use_bias,
                               activation="sigmoid")
        self.layers = layers
        self.conv = conv
        paras = [
            dict(params=layers.parameters(), weight_decay=weight_decay),
            dict(params=conv.parameters(), weight_decay=weight_decay),
        ]

        # do not use weight_decay in the final layer
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
        self.dropout = nn.Dropout(dropout)
コード例 #16
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01):

        super().__init__()
        head = 1
        conv = []
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        num_heads=num_head,
                        feat_drop=dropout,
                        attn_drop=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Flatten(start_dim=1))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    num_heads=1,
                    feat_drop=dropout,
                    attn_drop=dropout))
        conv = Sequential(*conv, inverse=True)  # `inverse=True` is important

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
コード例 #17
0
ファイル: graphsage.py プロジェクト: gitxchen/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False,
                 aggregator='mean',
                 output_normalize=False,
                 sizes=[15, 5],
                 concat=True):

        super().__init__()
        Agg = _AGG.get(aggregator, None)
        if not Agg:
            raise ValueError(
                f"Invalid value of 'aggregator', allowed values {tuple(_AGG.keys())}, but got '{aggregator}'."
            )

        self.output_normalize = output_normalize
        self.sizes = sizes
        assert len(sizes) == len(hids) + 1

        aggregators, act_layers = nn.ModuleList(), nn.ModuleList()
        for hid, act in zip(hids, acts):
            aggregators.append(Agg(in_features, hid, concat=concat, bias=bias))
            act_layers.append(activations.get(act))
            in_features = hid * 2 if concat else hid

        aggregators.append(Agg(in_features, out_features, bias=bias))

        self.aggregators = aggregators
        self.dropout = nn.Dropout(dropout)
        self.acts = act_layers

        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
コード例 #18
0
ファイル: obvat.py プロジェクト: EdisonLeeeee/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 p1=1.0,
                 p2=1.0,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 pt_epochs=10,
                 bias=False):
        super().__init__()
        self.r_adv = nn.Parameter(torch.zeros(
            num_nodes, in_features))  # it is better to use zero initializer
        self.adv_optimizer = optim.Adam([self.r_adv], lr=lr / 10)

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.p1 = p1
        self.p2 = p2
        self.pt_epochs = pt_epochs
コード例 #19
0
    def __init__(self,
                 in_features,
                 out_features,
                 alpha=None, # not used
                 K=None, # not used
                 eps_U=0.3,
                 eps_V=1.2,
                 lamb_U=0.8,
                 lamb_V=0.8,
                 hids=[],
                 acts=[],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        
        conv = []
        conv.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            conv.append(GraphEigenConv(in_features,
                                      hid,
                                      bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            
        conv.append(GraphEigenConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          weight_decay=weight_decay, lr=lr),
                     metrics=[Accuracy()])
        self.eps_U = eps_U
        self.eps_V = eps_V
        self.lamb_U = lamb_U
        self.lamb_V = lamb_V
コード例 #20
0
ファイル: latgcn.py プロジェクト: EdisonLeeeee/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 gamma=0.01,
                 eta=0.1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.2,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        assert hids, "LATGCN requires hidden layers"
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
        self.conv1 = conv[:3]  # includes dropout, ReLU and the first GCN layer
        self.conv2 = conv[3:]  # remainder
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam([
                         dict(params=self.conv1.parameters(),
                              weight_decay=weight_decay),
                         dict(params=self.conv2.parameters(), weight_decay=0.)
                     ],
                                          lr=lr),
                     metrics=[Accuracy()])

        self.zeta_opt = optim.Adam([self.zeta], lr=lr)

        self.gamma = gamma
        self.eta = eta
コード例 #21
0
ファイル: appnp.py プロジェクト: MemMeta/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True,
                 approximated=True):

        super().__init__()
        lin = []
        lin.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features,
                                 hid,
                                 bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)
        self.lin = lin
        if approximated:
            self.propagation = APPNPropagation(alpha=alpha, K=K,
                                               dropout=ppr_dropout)
        else:
            self.propagation = PPNPropagation(dropout=ppr_dropout)
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam([dict(params=lin[1].parameters(),
                                                weight_decay=weight_decay),
                                           dict(params=lin[2:].parameters(),
                                                weight_decay=0.)], lr=lr),
                     metrics=[Accuracy()])
        self.act_fn = nn.ReLU()
コード例 #22
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[],
                 activations=[],
                 dropout=0.5,
                 weight_decay=5e-5,
                 lr=0.2,
                 use_bias=False):
        super().__init__()

        if len(hiddens) != len(activations):
            raise RuntimeError(
                f"Arguments 'hiddens' and 'activations' should have the same length."
                " Or you can set both of them to `[]`.")

        layers = ModuleList()
        acts = []
        paras = []
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = Linear(inc, hidden, bias=use_bias)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            layers.append(layer)
            inc = hidden
            acts.append(get_activation(activation))

        layer = Linear(inc, out_channels, bias=use_bias)
        layers.append(layer)
        paras.append(dict(params=layer.parameters(),
                          weight_decay=weight_decay))

        self.layers = layers
        self.acts = acts
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
コード例 #23
0
ファイル: agnn.py プロジェクト: gitxchen/GraphGallery
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 num_attn=2,
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []

        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        # for Cora dataset, the first propagation layer is non-trainable
        # and beta is fixed at 0
        conv.append(SimilarityAttention(trainable=False))
        for _ in range(1, num_attn):
            conv.append(SimilarityAttention())

        conv.append(nn.Linear(in_features, out_features, bias=bias))
        conv.append(nn.Dropout(dropout))
        conv = Sequential(*conv)
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
コード例 #24
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        super().__init__()

        layers = nn.ModuleList()
        paras = []
        acts_fn = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = nn.Linear(inc, hid, bias=use_bias)

            layers.append(layer)
            acts_fn.append(get_activation(act))
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hid

        conv = GraphConvolution(inc, out_channels, use_bias=use_bias)
        # do not use weight_decay in the final layer
        paras.append(dict(params=conv.parameters(), weight_decay=0.))
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
        self.dropout = nn.Dropout(dropout)
        self.acts_fn = acts_fn
        self.layers = layers
        self.conv = conv