Exemplo n.º 1
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=1.0,
                 epsilon=0.9,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.alpha = alpha
        self.epsilon = epsilon
Exemplo n.º 2
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 eta=0.1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.2,
                 bias=False):
        super().__init__()
        assert hids, "LATGCN requires hidden layers"
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features,
                                hid,
                                bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
        self.conv1 = conv[:3]  # includes dropout, ReLU and the first GCN layer
        self.conv2 = conv[3:]  # the remaining

        self.eta = eta

        self.reg_paras = self.conv1.parameters()
        self.non_reg_paras = self.conv2.parameters()
Exemplo n.º 3
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[64],
                 acts=[None],
                 lambda_=5.0,
                 gamma=0.1,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        self.lambda_ = lambda_
        self.gamma = gamma
        assert hids, "hids should not empty"
        layers = nn.ModuleList()
        act_layers = nn.ModuleList()
        inc = in_features
        for hid, act in zip(hids, acts):
            layers.append(GCNConv(in_features,
                                  hid,
                                  bias=bias))
            act_layers.append(activations.get(act))
            inc = hid

        layers.append(GCNConv(inc,
                              out_features,
                              bias=bias))
        act_layers.append(activations.get(None))

        self.layers = layers
        self.act_layers = act_layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_features] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)

        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay),
                     metrics=[Accuracy()])

        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
Exemplo n.º 4
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[64],
                 acts=[None],
                 gamma=0.1,
                 dropout=0.5,
                 bias=False):

        super().__init__()
        self.gamma = gamma
        assert hids, "hids should not empty"
        layers = nn.ModuleList()
        act_layers = nn.ModuleList()
        inc = in_features
        for hid, act in zip(hids, acts):
            layers.append(GCNConv(in_features,
                                  hid,
                                  bias=bias))
            act_layers.append(activations.get(act))
            inc = hid

        layers.append(GCNConv(inc,
                              out_features,
                              bias=bias))
        act_layers.append(activations.get(None))

        self.layers = layers
        self.act_layers = act_layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_features] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)
        self.dropout = nn.Dropout(dropout)
        self._adj_knn = None
        self.reset_parameters()
Exemplo n.º 5
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 p1=1.0,
                 p2=1.0,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 pt_epochs=10,
                 bias=False):
        super().__init__()
        self.r_adv = nn.Parameter(torch.zeros(
            num_nodes, in_features))  # it is better to use zero initializer
        self.adv_optimizer = optim.Adam([self.r_adv], lr=lr / 10)

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.p1 = p1
        self.p2 = p2
        self.pt_epochs = pt_epochs
Exemplo n.º 6
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 gamma=0.01,
                 eta=0.1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.2,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        assert hids, "LATGCN requires hidden layers"
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
        self.conv1 = conv[:3]  # includes dropout, ReLU and the first GCN layer
        self.conv2 = conv[3:]  # remainder
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam([
                         dict(params=self.conv1.parameters(),
                              weight_decay=weight_decay),
                         dict(params=self.conv2.parameters(), weight_decay=0.)
                     ],
                                          lr=lr),
                     metrics=[Accuracy()])

        self.zeta_opt = optim.Adam([self.zeta], lr=lr)

        self.gamma = gamma
        self.eta = eta
Exemplo n.º 7
0
    def __init__(self,
                 in_features,
                 *,
                 out_features=16,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.,
                 bias=False):
        super().__init__()
        encoder = []
        encoder.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            encoder.append(GCNConv(in_features, hid, bias=bias))
            encoder.append(activations.get(act))
            encoder.append(nn.Dropout(dropout))
            in_features = hid
        encoder.append(GCNConv(in_features, out_features, bias=bias))
        encoder = Sequential(*encoder)

        self.encoder = encoder
        self.decoder = InnerProductDecoder()
Exemplo n.º 8
0
    def __init__(self,
                 in_features,
                 *,
                 out_features=16,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        self.mu_conv = GCNConv(in_features, out_features, bias=bias)
        self.logstd_conv = GCNConv(in_features, out_features, bias=bias)
        self.conv = Sequential(*conv)
        self.decoder = InnerProductDecoder()
Exemplo n.º 9
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features,
                                hid,
                                bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.reg_paras = conv[1].parameters()
        self.non_reg_paras = conv[2:].parameters()