示例#1
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 gamma=1.,
                 bias=False):

        super().__init__()

        assert len(hids) > 0 and len(acts) > 0
        # The first layer that conver node features to distribution
        self.conv1 = GaussionConvF(in_features,
                                   hids[0],
                                   gamma=gamma,
                                   bias=bias)
        self.act1 = activations.get(acts[0])
        in_features = hids[0]

        conv2 = nn.ModuleList()
        act2 = nn.ModuleList()
        for hid, act in zip(hids[1:], acts[1:]):
            conv2.append(
                GaussionConvD(in_features, hid, gamma=gamma, bias=bias))
            act2.append(activations.get(act))
            in_features = hid

        conv2.append(GaussionConvD(in_features, out_features, bias=bias))
        self.conv2 = conv2
        self.act2 = act2
        self.dropout = nn.Dropout(dropout)
示例#2
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[64],
                 acts=[None],
                 lambda_=5.0,
                 gamma=0.1,
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()
        self.lambda_ = lambda_
        self.gamma = gamma
        assert hids, "hids should not empty"
        layers = nn.ModuleList()
        act_layers = nn.ModuleList()
        inc = in_features
        for hid, act in zip(hids, acts):
            layers.append(GCNConv(in_features,
                                  hid,
                                  bias=bias))
            act_layers.append(activations.get(act))
            inc = hid

        layers.append(GCNConv(inc,
                              out_features,
                              bias=bias))
        act_layers.append(activations.get(None))

        self.layers = layers
        self.act_layers = act_layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_features] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)

        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay),
                     metrics=[Accuracy()])

        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
示例#3
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 hids: list = [16],
                 acts: list = ['relu'],
                 dropout: float = 0.5,
                 bias: bool = True,
                 gamma: float = 1.0):
        r"""
        Parameters
        ----------
        in_features : int, 
            the input dimmensions of model
        out_features : int, 
            the output dimensions of model
        hids : list, optional
            the number of hidden units of each hidden layer, by default [16]
        acts : list, optional
            the activaction function of each hidden layer, by default ['relu']
        dropout : float, optional
            the dropout ratio of model, by default 0.5
        bias : bool, optional
            whether to use bias in the layers, by default True
        gamma : float, optional
            the attention weight, by default 1.0
        """

        super().__init__()

        assert len(hids) == len(acts) and len(hids) > 0
        self.conv1 = RobustConv(in_features,
                                hids[0],
                                bias=bias,
                                activation=activations.get(acts[0]))

        conv2 = nn.ModuleList()
        in_features = hids[0]

        for hid, act in zip(hids[1:], acts[1:]):
            conv2.append(
                RobustConv(in_features,
                           hid,
                           bias=bias,
                           gamma=gamma,
                           activation=activations.get(act)))
            in_features = hid

        conv2.append(
            RobustConv(in_features, out_features, gamma=gamma, bias=bias))
        self.conv2 = conv2
        self.dropout = nn.Dropout(dropout)
示例#4
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 S=1,
                 K=4,
                 temp=0.5,
                 lam=1.,
                 bias=False,
                 bn=False):

        super().__init__()

        mlp = []
        for hid, act in zip(hids, acts):
            if bn:
                mlp.append(nn.BatchNorm1d(in_features))
            mlp.append(nn.Linear(in_features, hid, bias=bias))
            mlp.append(activations.get(act))
            mlp.append(nn.Dropout(dropout))
            in_features = hid
        if bn:
            mlp.append(nn.BatchNorm1d(in_features))
        mlp.append(nn.Linear(in_features, out_features, bias=bias))
        self.mlp = mlp = nn.Sequential(*mlp)

        self.K = K
        self.temp = temp
        self.lam = lam
        self.dropout = dropout
        self.S = S
示例#5
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True,
                 approximated=True):

        super().__init__()
        lin = []
        lin.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)
        self.lin = lin
        if approximated:
            self.propagation = APPNProp(alpha=alpha, K=K, dropout=ppr_dropout)
        else:
            self.propagation = PPNProp(dropout=ppr_dropout)

        self.reg_paras = lin[1].parameters()
        self.non_reg_paras = lin[2:].parameters()
示例#6
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 bn=False,
                 dropout=0.5,
                 bias=False):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            if bn:
                lin.append(nn.BatchNorm1d(in_features))
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        if bn:
            lin.append(nn.BatchNorm1d(in_features))
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)

        self.lin = lin
示例#7
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)

        self.lin = lin
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=lin[1].parameters(), weight_decay=weight_decay),
                dict(params=lin[2:].parameters(), weight_decay=0.),
            ],
                                 lr=lr),
            metrics=[Accuracy()])
示例#8
0
    def __init__(self,
                 in_features,
                 out_features,
                 edge_features,
                 *,
                 hids=[32],
                 pdn_hids=32,
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True):
        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.fc = nn.Sequential(nn.Linear(edge_features, pdn_hids), nn.ReLU(),
                                nn.Linear(pdn_hids, 1), nn.Sigmoid())
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
示例#9
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                WaveletConv(in_features, hid, num_nodes=num_nodes, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            WaveletConv(in_features,
                        out_features,
                        num_nodes=num_nodes,
                        bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
示例#10
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                GCNConv(in_features,
                        hid,
                        cached=True,
                        bias=bias,
                        normalize=False))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            GCNConv(in_features,
                    out_features,
                    cached=True,
                    bias=bias,
                    normalize=False))
        conv = Sequential(*conv)

        self.conv = conv
        self.reg_paras = conv[1].parameters()
        self.non_reg_paras = conv[2:].parameters()
示例#11
0
    def __init__(self,
                 in_features,
                 out_features,
                 p=0.3,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True):

        super().__init__()

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                GCNConv(in_features,
                        hid,
                        cached=True,
                        bias=bias,
                        normalize=False))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            GCNConv(in_features,
                    out_features,
                    cached=True,
                    bias=bias,
                    normalize=False))
        conv = Sequential(*conv)

        self.p = p
        self.conv = conv
示例#12
0
    def __init__(self,
                 in_features,
                 out_features,
                 num_nodes,
                 *,
                 eta=0.1,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.2,
                 bias=False):
        super().__init__()
        assert hids, "LATGCN requires hidden layers"
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features,
                                hid,
                                bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.zeta = nn.Parameter(torch.randn(num_nodes, hids[0]))
        self.conv1 = conv[:3]  # includes dropout, ReLU and the first GCN layer
        self.conv2 = conv[3:]  # the remaining

        self.eta = eta

        self.reg_paras = self.conv1.parameters()
        self.non_reg_paras = self.conv2.parameters()
示例#13
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(MedianConv(in_features,
                                   hid,
                                   add_self_loops=False,
                                   bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(MedianConv(in_features,
                               out_features,
                               add_self_loops=False,
                               bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.reg_paras = conv[0].parameters()
        self.non_reg_paras = conv[1:].parameters()
示例#14
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[16],
                 acts=['relu'],
                 alpha=0.45,
                 dropout=0.5,
                 bias=False):

        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(TrimmedConv(in_features,
                                    hid,
                                    bias=bias,
                                    alpha=alpha))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(TrimmedConv(in_features, out_features,
                                bias=bias,
                                alpha=alpha))
        conv = Sequential(*conv)

        self.conv = conv
        self.reg_paras = conv[1].parameters()
        self.non_reg_paras = conv[2:].parameters()
示例#15
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 num_attn=2,
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):
        super().__init__()
        conv = []

        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        # for Cora dataset, the first propagation layer is non-trainable
        # and beta is fixed at 0
        conv.append(AGNNConv(trainable=False))
        for _ in range(1, num_attn):
            conv.append(AGNNConv())

        conv.append(nn.Linear(in_features, out_features, bias=bias))
        conv.append(nn.Dropout(dropout))
        conv = Sequential(*conv)
        self.conv = conv
示例#16
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=0.1,
                 K=10,
                 ppr_dropout=0.,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin = nn.Sequential(*lin)
        self.lin = lin
        self.propagation = APPNPConv(K, alpha, ppr_dropout)
示例#17
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(nn.Linear(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GraphConvolution(in_features, out_features, bias=bias))
        conv = Sequential(*conv)
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
示例#18
0
    def __init__(self,
                 in_features,
                 out_features,
                 K=10,
                 alpha=0.1,
                 hids=[],
                 acts=[],
                 dropout=0.5,
                 bias=False):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features,
                                 hid,
                                 bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin = nn.Sequential(*lin)
        conv = SpectralEigenConv(in_features, out_features, bias=bias, K=K, alpha=alpha)

        self.lin = lin
        self.conv = conv
示例#19
0
    def __init__(self,
                 in_features,
                 *,
                 out_features=16,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False):
        super().__init__()

        encoder = []
        for hid, act in zip(hids, acts):
            encoder.append(
                GCNConv(in_features,
                        hid,
                        cached=True,
                        bias=bias,
                        normalize=True))
            encoder.append(activations.get(act))
            encoder.append(nn.Dropout(dropout))
            in_features = hid

        encoder.append(
            GCNConv(in_features,
                    out_features,
                    cached=True,
                    bias=bias,
                    normalize=True))

        self.encoder = Sequential(*encoder)
        self.decoder = InnerProductDecoder()
示例#20
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 alpha=1.0,
                 epsilon=0.9,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):
        super().__init__()
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features, hid, bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(GCNConv(in_features, out_features, bias=bias))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
        self.alpha = alpha
        self.epsilon = epsilon
示例#21
0
    def __init__(self,
                 in_features,
                 out_features,
                 mapsize_a,
                 mapsize_b,
                 conv_channel=64,
                 hids=[200],
                 acts=['relu6'],
                 attnum=10,
                 dropout=0.6,
                 bias=True):

        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels=in_features,
                      out_channels=conv_channel,
                      kernel_size=(2, 1),
                      stride=1,
                      padding=0),
            nn.Softmax(dim=1),
        )
        lin = []
        in_features = (mapsize_a - 1) * mapsize_b * conv_channel
        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))

        self.lin = nn.Sequential(*lin)
        self.attention = nn.Parameter(
            torch.ones(attnum, mapsize_a - 1, mapsize_b))
示例#22
0
    def __init__(self,
                 in_features,
                 out_features,
                 in_edge_features,
                 *,
                 hids=[32],
                 pdn_hids=32,
                 acts=['relu'],
                 dropout=0.5,
                 bias=True):
        super().__init__()

        conv = []
        for hid, act in zip(hids, acts):
            conv.append(GCNConv(in_features,
                                hid,
                                bias=bias))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid

        conv.append(GCNConv(in_features,
                            out_features,
                            bias=bias))
        conv = Sequential(*conv)

        self.fc = nn.Sequential(nn.Linear(in_edge_features, pdn_hids),
                                nn.ReLU(),
                                nn.Linear(pdn_hids, 1),
                                nn.Sigmoid())
        self.conv = conv
示例#23
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[64],
                 acts=[None],
                 gamma=0.1,
                 dropout=0.5,
                 bias=False):

        super().__init__()
        self.gamma = gamma
        assert hids, "hids should not empty"
        layers = nn.ModuleList()
        act_layers = nn.ModuleList()
        inc = in_features
        for hid, act in zip(hids, acts):
            layers.append(GCNConv(in_features,
                                  hid,
                                  bias=bias))
            act_layers.append(activations.get(act))
            inc = hid

        layers.append(GCNConv(inc,
                              out_features,
                              bias=bias))
        act_layers.append(activations.get(None))

        self.layers = layers
        self.act_layers = act_layers
        self.scores = nn.ParameterList()
        self.bias = nn.ParameterList()
        self.D_k = nn.ParameterList()
        self.D_bias = nn.ParameterList()

        for hid in [in_features] + hids:
            self.scores.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.bias.append(nn.Parameter(torch.FloatTensor(1)))
            self.D_k.append(nn.Parameter(torch.FloatTensor(hid, 1)))
            self.D_bias.append(nn.Parameter(torch.FloatTensor(1)))

        # discriminator for ssl
        self.linear = nn.Linear(hids[-1], 1)
        self.dropout = nn.Dropout(dropout)
        self._adj_knn = None
        self.reset_parameters()
示例#24
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 bias=False,
                 K=10):
        super().__init__()

        lin = []
        for hid, act in zip(hids, acts):
            lin.append(nn.Dropout(dropout))
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            in_features = hid
        lin.append(nn.Linear(in_features, out_features, bias=bias))
        lin.append(activations.get(act))
        lin.append(nn.Dropout(dropout))
        lin = nn.Sequential(*lin)
        self.lin = lin
        self.conv = DAGNNConv(out_features, K=K, bias=bias)
示例#25
0
    def __init__(self,
                 in_features,
                 out_features,
                 act='gelu',
                 dropout=0.6,
                 bias=True):
        super().__init__()
        self.fc1 = nn.Linear(in_features, out_features, bias=bias)
        self.fc2 = nn.Linear(out_features, out_features, bias=bias)
        self.act = activations.get(act)

        self.dropout = nn.Dropout(dropout)
        self.layernorm = nn.LayerNorm(out_features, eps=1e-6)
        self.reset_parameters()
示例#26
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=True):

        super().__init__()
        head = 1
        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        heads=num_head,
                        bias=bias,
                        dropout=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    heads=1,
                    bias=bias,
                    concat=False,
                    dropout=dropout))
        conv = Sequential(*conv)

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
示例#27
0
    def __init__(self,
                 in_features,
                 out_features,
                 p=0.3,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 kl=0.005,
                 lr=0.01,
                 bias=True):

        super().__init__()

        conv = []
        conv.append(nn.Dropout(dropout))
        for hid, act in zip(hids, acts):
            conv.append(
                GCNConv(in_features,
                        hid,
                        cached=True,
                        bias=bias,
                        normalize=False))
            conv.append(activations.get(act))
            conv.append(nn.Dropout(dropout))
            in_features = hid
        conv.append(
            GCNConv(in_features,
                    out_features,
                    cached=True,
                    bias=bias,
                    normalize=False))
        conv = Sequential(*conv)

        self.p = p
        self.kl = kl
        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[1].parameters(), weight_decay=weight_decay),
                dict(params=conv[2:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
示例#28
0
    def __init__(self,
                 in_features,
                 out_features,
                 K=10,
                 alpha=0.1,
                 eps_U=0.3,
                 eps_V=1.2,
                 lamb_U=0.8,
                 lamb_V=0.8,
                 hids=[],
                 acts=[],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False):

        super().__init__()

        lin = []
        lin.append(nn.Dropout(dropout))

        for hid, act in zip(hids, acts):
            lin.append(nn.Linear(in_features, hid, bias=bias))
            lin.append(activations.get(act))
            lin.append(nn.Dropout(dropout))
            in_features = hid
        lin = nn.Sequential(*lin)
        conv = SpectralEigenConv(in_features,
                                 out_features,
                                 bias=bias,
                                 K=K,
                                 alpha=alpha)

        self.lin = lin
        self.conv = conv
        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          weight_decay=weight_decay,
                                          lr=lr),
                     metrics=[Accuracy()])
        self.eps_U = eps_U
        self.eps_V = eps_V
        self.lamb_U = lamb_U
        self.lamb_V = lamb_V
示例#29
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01):

        super().__init__()
        head = 1
        conv = []
        for hid, num_head, act in zip(hids, num_heads, acts):
            conv.append(
                GATConv(in_features * head,
                        hid,
                        num_heads=num_head,
                        feat_drop=dropout,
                        attn_drop=dropout))
            conv.append(activations.get(act))
            conv.append(nn.Flatten(start_dim=1))
            conv.append(nn.Dropout(dropout))
            in_features = hid
            head = num_head

        conv.append(
            GATConv(in_features * head,
                    out_features,
                    num_heads=1,
                    feat_drop=dropout,
                    attn_drop=dropout))
        conv = Sequential(*conv, inverse=True)  # `inverse=True` is important

        self.conv = conv
        self.compile(
            loss=nn.CrossEntropyLoss(),
            optimizer=optim.Adam([
                dict(params=conv[0].parameters(), weight_decay=weight_decay),
                dict(params=conv[1:].parameters(), weight_decay=0.)
            ],
                                 lr=lr),
            metrics=[Accuracy()])
示例#30
0
    def __init__(self,
                 in_features,
                 out_features,
                 hids=[32],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 bias=False,
                 aggregator='mean',
                 output_normalize=False,
                 sizes=[15, 5],
                 concat=True):

        super().__init__()
        Agg = _AGG.get(aggregator, None)
        if not Agg:
            raise ValueError(
                f"Invalid value of 'aggregator', allowed values {tuple(_AGG.keys())}, but got '{aggregator}'."
            )

        self.output_normalize = output_normalize
        self.sizes = sizes
        assert len(sizes) == len(hids) + 1

        aggregators, act_layers = nn.ModuleList(), nn.ModuleList()
        for hid, act in zip(hids, acts):
            aggregators.append(Agg(in_features, hid, concat=concat, bias=bias))
            act_layers.append(activations.get(act))
            in_features = hid * 2 if concat else hid

        aggregators.append(Agg(in_features, out_features, bias=bias))

        self.aggregators = aggregators
        self.dropout = nn.Dropout(dropout)
        self.acts = act_layers

        self.compile(loss=nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])