Beispiel #1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        self.layers = ModuleList()

        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GraphConv(inc,
                              hidden,
                              activation=get_activation(activation),
                              bias=use_bias)
            self.layers.append(layer)
            inc = hidden
        # output layer
        self.layers.append(GraphConv(inc, out_channels))

        self.dropout = Dropout(p=dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(self.parameters(),
                                          lr=lr,
                                          weight_decay=weight_decay),
                     metrics=[Accuracy()])
Beispiel #2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[64],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-3,
                 lr=0.01,
                 use_bias=False,
                 K=10):
        super().__init__()

        layers = nn.ModuleList()
        acts_fn = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = nn.Linear(inc, hid, bias=use_bias)
            layers.append(layer)
            acts_fn.append(get_activation(act))
            inc = hid

        layer = nn.Linear(inc, out_channels, bias=use_bias)
        acts_fn.append(get_activation(act))
        layers.append(layer)

        conv = PropConvolution(out_channels,
                               K=K,
                               use_bias=use_bias,
                               activation="sigmoid")
        self.layers = layers
        self.conv = conv
        paras = [
            dict(params=layers.parameters(), weight_decay=weight_decay),
            dict(params=conv.parameters(), weight_decay=weight_decay),
        ]

        # do not use weight_decay in the final layer
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
        self.dropout = nn.Dropout(dropout)
Beispiel #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[8],
                 num_heads=[8],
                 acts=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        layers = ModuleList()
        act_fns = []
        paras = []

        inc = in_channels
        pre_head = 1
        for hid, num_head, act in zip(hids, num_heads, acts):
            layer = GATConv(inc * pre_head,
                            hid,
                            heads=num_head,
                            bias=use_bias,
                            dropout=dropout)
            layers.append(layer)
            act_fns.append(get_activation(act))
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hid
            pre_head = num_head

        layer = GATConv(inc * pre_head,
                        out_channels,
                        heads=1,
                        bias=use_bias,
                        concat=False,
                        dropout=dropout)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.act_fns = act_fns
        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Beispiel #4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        paras = []
        acts = []
        layers = ModuleList()

        # use ModuleList to create layers with different size
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GCNConv(inc,
                            hidden,
                            cached=True,
                            bias=use_bias,
                            normalize=False)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            acts.append(get_activation(activation))
            inc = hidden

        layer = GCNConv(inc,
                        out_channels,
                        cached=True,
                        bias=use_bias,
                        normalize=False)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.acts = acts
        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Beispiel #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[8],
                 n_heads=[8],
                 activations=['elu'],
                 dropout=0.6,
                 weight_decay=5e-4,
                 lr=0.01):

        super().__init__()

        layers = ModuleList()
        paras = []

        inc = in_channels
        pre_head = 1
        for hidden, n_head, activation in zip(hiddens, n_heads, activations):
            layer = GATConv(inc * pre_head,
                            hidden,
                            activation=get_activation(activation),
                            num_heads=n_head,
                            feat_drop=dropout,
                            attn_drop=dropout)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hidden
            pre_head = n_head

        layer = GATConv(inc * pre_head,
                        out_channels,
                        num_heads=1,
                        feat_drop=dropout,
                        attn_drop=dropout)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Beispiel #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[],
                 activations=[],
                 dropout=0.5,
                 weight_decay=5e-5,
                 lr=0.2,
                 use_bias=False):
        super().__init__()

        if len(hiddens) != len(activations):
            raise RuntimeError(
                f"Arguments 'hiddens' and 'activations' should have the same length."
                " Or you can set both of them to `[]`.")

        layers = ModuleList()
        acts = []
        paras = []
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = Linear(inc, hidden, bias=use_bias)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            layers.append(layer)
            inc = hidden
            acts.append(get_activation(activation))

        layer = Linear(inc, out_channels, bias=use_bias)
        layers.append(layer)
        paras.append(dict(params=layer.parameters(),
                          weight_decay=weight_decay))

        self.layers = layers
        self.acts = acts
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Beispiel #7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hids=[16],
                 acts=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=False):

        super().__init__()

        layers = nn.ModuleList()
        paras = []
        acts_fn = []

        # use ModuleList to create layers with different size
        inc = in_channels
        for hid, act in zip(hids, acts):
            layer = nn.Linear(inc, hid, bias=use_bias)

            layers.append(layer)
            acts_fn.append(get_activation(act))
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            inc = hid

        conv = GraphConvolution(inc, out_channels, use_bias=use_bias)
        # do not use weight_decay in the final layer
        paras.append(dict(params=conv.parameters(), weight_decay=0.))
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
        self.dropout = nn.Dropout(dropout)
        self.acts_fn = acts_fn
        self.layers = layers
        self.conv = conv