Beispiel #1
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                WaveletConv(in_features, hid, num_nodes=num_nodes, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            WaveletConv(in_features,
                        out_features,
                        num_nodes=num_nodes,
                        bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
Beispiel #2
0
    def __init__(self,
                 in_features,
                 out_features,
                 in_edge_features,
                 *,
                 hids=[32],
                 pdn_hids=32,
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):
        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features,
                                hid,
                                bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        conv.append(GCNConv(in_features,
                            out_features,
                            bias=bias))
        conv = Sequential(*conv)

        self.fc = nn.Sequential(nn.Linear(in_edge_features, pdn_hids),
                                nn.ReLU(),
                                nn.Linear(pdn_hids, 1),
                                nn.Sigmoid())
        self.conv = conv
Beispiel #3
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 tperc=0.45,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(TrimmedConv(in_features, hid, bias=bias, tperc=tperc))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            TrimmedConv(in_features, out_features, bias=bias, tperc=tperc))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
Beispiel #4
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[256],
                 acts=['gelu'],
                 dropout=0.6,
                 bias=True):

        super().__init__()
        mlp = []
        for hid, act in zip(hids, acts):
            mlp.append(
                MLP(in_features, hid, act=act, dropout=dropout, bias=bias))
            in_features = hid
        self.mlp = Sequential(*mlp)
        self.classifier = nn.Linear(in_features, out_features, bias=bias)
Beispiel #5
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True):

        super().__init__()
        head = 1
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        heads=num_head,
                        bias=bias,
                        dropout=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    heads=1,
                    bias=bias,
                    concat=False,
                    dropout=dropout))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
Beispiel #6
0
    def __init__(self,
                 in_features,
                 out_features,
                 p=0.3,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 kl=0.005,
                 lr=0.01,
                 bias=True):

        super().__init__()

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                GCNConv(in_features,
                        hid,
                        cached=True,
                        bias=bias,
                        normalize=False))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            GCNConv(in_features,
                    out_features,
                    cached=True,
                    bias=bias,
                    normalize=False))
        conv = Sequential(*conv)

        self.p = p
        self.kl = kl
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
Beispiel #7
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01):

        super().__init__()
        head = 1
        conv = []
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        num_heads=num_head,
                        feat_drop=dropout,
                        attn_drop=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Flatten(start_dim=1))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    num_heads=1,
                    feat_drop=dropout,
                    attn_drop=dropout))
        conv = Sequential(*conv, inverse=True)  # `inverse=True` is important

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
Beispiel #8
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 p1=1.0,
                 p2=1.0,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 pt_epochs=10,
                 bias=False):
        super().__init__()
        self.r_adv = nn.Parameter(torch.zeros(
            num_nodes, in_features))  # it is better to use zero initializer
        self.adv_optimizer = optim.Adam([self.r_adv], lr=lr / 10)

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.p1 = p1
        self.p2 = p2
        self.pt_epochs = pt_epochs
Beispiel #9
0
    def __init__(self,
                 in_features,
                 out_features,
                 alpha=None, # not used
                 K=None, # not used
                 eps_U=0.3,
                 eps_V=1.2,
                 lamb_U=0.8,
                 lamb_V=0.8,
                 hids=[],
                 acts=[],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        
        conv = []
        conv.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            conv.append(GraphEigenConv(in_features,
                                      hid,
                                      bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            
        conv.append(GraphEigenConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          weight_decay=weight_decay, lr=lr),
                     metrics=[Accuracy()])
        self.eps_U = eps_U
        self.eps_V = eps_V
        self.lamb_U = lamb_U
        self.lamb_V = lamb_V
Beispiel #10
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 gamma=0.01,
                 eta=0.1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.2,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        assert hids, "LATGCN requires hidden layers"
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
        self.conv1 = conv[:3]  # includes dropout, ReLU and the first GCN layer
        self.conv2 = conv[3:]  # remainder
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam([
                         dict(params=self.conv1.parameters(),
                              weight_decay=weight_decay),
                         dict(params=self.conv2.parameters(), weight_decay=0.)
                     ],
                                          lr=lr),
                     metrics=[Accuracy()])

        self.zeta_opt = optim.Adam([self.zeta], lr=lr)

        self.gamma = gamma
        self.eta = eta
Beispiel #11
0
    def __init__(self,
                 in_features,
                 *,
                 out_features=16,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.,
                 bias=False):
        super().__init__()
        encoder = []
        encoder.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            encoder.append(GCNConv(in_features, hid, bias=bias))
            encoder.append(activations.get(act))
            encoder.append(nn.Dropout(dropout))
            in_features = hid
        encoder.append(GCNConv(in_features, out_features, bias=bias))
        encoder = Sequential(*encoder)

        self.encoder = encoder
        self.decoder = InnerProductDecoder()
Beispiel #12
0
    def __init__(self,
                 in_features,
                 *,
                 out_features=16,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        self.mu_conv = GCNConv(in_features, out_features, bias=bias)
        self.logstd_conv = GCNConv(in_features, out_features, bias=bias)
        self.conv = Sequential(*conv)
        self.decoder = InnerProductDecoder()
Beispiel #13
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 xi=1e-6,
                 p1=1.0,
                 p2=1.0,
                 epsilon=5e-2,
                 num_power_iterations=1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.xi = xi
        self.p1 = p1
        self.p2 = p2
        self.epsilon = epsilon
        self.num_power_iterations = num_power_iterations
Beispiel #14
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):

        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)
        self.conv = conv
        self.reg_paras = conv[0].parameters()
        self.non_reg_paras = conv[1:].parameters()
Beispiel #15
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 num_attn=2,
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []

        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        # for Cora dataset, the first propagation layer is non-trainable
        # and beta is fixed at 0
        conv.append(SimilarityAttention(trainable=False))
        for _ in range(1, num_attn):
            conv.append(SimilarityAttention())

        conv.append(nn.Linear(in_features, out_features, bias=bias))
        conv.append(nn.Dropout(dropout))
        conv = Sequential(*conv)
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
Beispiel #16
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=["relu"],
                 dropout=0.5,
                 bias=False):

        super().__init__()

        conv = []
        conv.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            conv.append(GraphEigenConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        conv.append(GraphEigenConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
Beispiel #17
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 bias=True):

        super().__init__()
        head = 1
        conv = []
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        bias=bias,
                        num_heads=num_head,
                        feat_drop=dropout,
                        attn_drop=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Flatten(start_dim=1))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    num_heads=1,
                    bias=bias,
                    feat_drop=dropout,
                    attn_drop=dropout))
        conv = Sequential(*conv,
                          loc=1)  # loc=1 specifies the location of features

        self.conv = conv
Beispiel #18
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 bias=True):

        super().__init__()
        head = 1
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        heads=num_head,
                        bias=bias,
                        dropout=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    heads=1,
                    bias=bias,
                    concat=False,
                    dropout=dropout))
        conv = Sequential(*conv)

        self.conv = conv
        self.reg_paras = conv[1].parameters()
        self.non_reg_paras = conv[2:].parameters()
Beispiel #19
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(
                GraphConv(in_features,
                          hid,
                          bias=bias,
                          activation=activations.get(act)))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GraphConv(in_features, out_features))
        conv = Sequential(*conv,
                          loc=1)  # loc=1 specifies the location of features

        self.conv = conv