Exemplo n.º 1
0
 def __init__(self):
     super(Net, self).__init__()
     #self.conv1 = TAGConv(4, 128, 3)
     #self.conv2 = TAGConv(128, 128, 3)
     self.conv1 = BallConv(MLP([2*3, 128, 128, 128]), 'max')
     self.conv2 = BallConv(MLP([2*128, 256]), 'max')        
     self.lin1 = torch.nn.Sequential(
         torch.nn.Linear(256,128),
         torch.nn.ReLU(),
         #BN(512),
     )     
     self.lin2 = torch.nn.Sequential(
         BN(128),
         torch.nn.Linear(128,128),
         torch.nn.ReLU(),
         BN(128),
         torch.nn.Dropout(0.5)
         )        
     self.lin3 = torch.nn.Sequential(
         torch.nn.Linear(128,128),
         torch.nn.ReLU(),
         BN(128),
         torch.nn.Dropout(0.5),
     )        
     self.output = torch.nn.Sequential(
         torch.nn.Linear(128, 10)
     )        
Exemplo n.º 2
0
    def __init__(
        self,
        in_channels,
        out_channels,
        dim,
        kernel_size,
        hidden_channels=None,
        dilation=1,
        bias=True,
        **kwargs,
    ):
        super(XConv, self).__init__()

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
Exemplo n.º 3
0
    def __init__(self, params):
        super(CVAE, self).__init__()
        self.model_str = 'CVAE'
        self.is_cuda = False

        self.latent_dim = latent_dim = params.get('latent_dim', 2)
        self.hdim = hdim = params.get('hdim', 100)
        self.batchnorm = params.get('batchnorm', True)

        # encoder
        self.fc1 = fc(784 + 10, hdim)
        if self.batchnorm:
            self.bn_1 = BN(hdim, momentum=.9)
        self.fc_mu = fc(hdim, latent_dim)  # output the mean of z
        if self.batchnorm:
            self.bn_mu = BN(latent_dim, momentum=.9)
        self.fc_logvar = fc(hdim,
                            latent_dim)  # output the log of the variance of z
        if self.batchnorm:
            self.bn_logvar = BN(latent_dim, momentum=.9)

        # decoder
        self.fc2 = fc(latent_dim + 10, hdim)
        if self.batchnorm:
            self.bn_2 = BN(hdim, momentum=.9)
        self.fc3 = fc(hdim, 784)
        if self.batchnorm:
            self.bn_3 = BN(784, momentum=.9)
Exemplo n.º 4
0
 def __init__(self, dataset, num_layers, hidden, mode='cat'):
     super(GIN0WithJK, self).__init__()
     self.conv1 = GINConv(Sequential(
         Linear(dataset.num_features, hidden),
         ReLU(),
         Linear(hidden, hidden),
         ReLU(),
         BN(hidden),
     ),
                          train_eps=False)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(
             GINConv(Sequential(
                 Linear(hidden, hidden),
                 ReLU(),
                 Linear(hidden, hidden),
                 ReLU(),
                 BN(hidden),
             ),
                     train_eps=False))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Exemplo n.º 5
0
 def __init__(self, dataset, num_layers, hidden1, hidden2, deltas, elasticity=0.01, num_iterations = 30):
     super(cut_MPNN, self).__init__()
     self.hidden1 = hidden1
     self.hidden2 = hidden2
     self.conv1 = GINConv(Sequential(
         Linear(1,  self.hidden1),
         ReLU(),
         Linear(self.hidden1, self.hidden1),
         ReLU(),
         BN( self.hidden1),
     ),train_eps=False)
     self.num_iterations = num_iterations
     self.convs = torch.nn.ModuleList()
     self.deltas = deltas
     self.numlayers = num_layers
     self.elasticity = elasticity
     
     self.bns = torch.nn.ModuleList()
     for i in range(num_layers-1):
         self.bns.append(BN( self.hidden1))
     self.convs = torch.nn.ModuleList()        
     for i in range(num_layers - 1):
             self.convs.append(GINConv(Sequential(
         Linear( self.hidden1,  self.hidden1),
         ReLU(),
         Linear( self.hidden1,  self.hidden1),
         ReLU(),
         BN(self.hidden1),
     ),train_eps=False))
  
     self.conv2 = GATAConv( self.hidden1, self.hidden2 ,heads=8)
     self.lin1 = Linear(8*self.hidden2, self.hidden1)
     self.bn2 = BN(self.hidden1)
     self.lin2 = Linear(self.hidden1, 1)
Exemplo n.º 6
0
 def __init__(self, features):
     super().__init__()
     self.c1 = Conv1d(features, int(features / 2), kernel_size=1)
     self.c2 = Conv1d(int(features / 2), int(features / 2), kernel_size=1)
     self.c3 = Conv1d(int(features / 2), features, kernel_size=1)
     self.bn1 = BN(int(features))
     self.bn2 = BN(int(features / 2))
Exemplo n.º 7
0
    def __init__(self, in_channels, out_channels, dim, kernel_size,
                 hidden_channels=None, dilation=1, bias=True, BiLinear=BiLinear, BiConv1d=BiConv1d, ifFirst=False, **kwargs):
        super(BiXConv, self).__init__()

        if knn_graph is None:
            raise ImportError('`XConv` requires `torch-cluster`.')

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        if ifFirst:
            Lin1 = Lin
        else:
            Lin1 = BiLinear

        self.mlp1 = S(
            Lin1(dim, C_delta),
            Hardtanh(),
            BN(C_delta),
            BiLinear(C_delta, C_delta),
            Hardtanh(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            Lin1(D * K, K**2),
            Hardtanh(),
            BN(K**2),
            Reshape(-1, K, K),
            BiConv1d(K, K**2, K, groups=K),
            Hardtanh(),
            BN(K**2),
            Reshape(-1, K, K),
            BiConv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            BiConv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            BiLinear(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
Exemplo n.º 8
0
    def __init__(self, edge_dim, dim_init, dim):
        super(GINE0Conv, self).__init__(aggr="add")

        self.edge_encoder = Sequential(Linear(edge_dim, dim_init), ReLU(),
                                       Linear(dim_init, dim_init), ReLU(),
                                       BN(dim_init))
        self.mlp = Sequential(Linear(dim_init, dim), ReLU(), Linear(dim, dim),
                              ReLU(), BN(dim))
Exemplo n.º 9
0
def MLP(channels,
        batch_norm=True,
        act='relu',
        bn_kwargs={},
        dropout=0.0,
        dropout_position="right",
        batchnorm_position="right",
        bias=True):
    if batch_norm:
        # bad practice to mix these two
        assert dropout == 0.0
    if dropout > 0.0:
        # bad practice to mix these two
        assert not batch_norm

    if isinstance(act, str) or act is None:
        act = [act] * (len(channels) - 1)

    if isinstance(batch_norm, bool):
        batch_norm = [batch_norm] * (len(channels) - 1)

    if isinstance(dropout, float):
        dropout = [dropout] * (len(channels) - 1)

    activation = []
    for a in act:
        if a is None:
            activation.append(Identity)
        elif a == 'relu':
            activation.append(ReLU)
        elif a == 'elu':
            activation.append(ELU)
        elif a == 'tanh':
            activation.append(Tanh)
        elif a == "prelu":
            activation.append(PReLU)
        else:
            raise ValueError("unrecognized keyword: {}".format(act))

    layers = []
    for i in range(1, len(channels)):
        if dropout[i - 1] > 0 and dropout_position == "left":
            layers.append(Dropout(p=dropout[i - 1]))

        layers.append(Lin(channels[i - 1], channels[i], bias=bias))

        if batch_norm[i - 1] and batchnorm_position == "left":
            layers.append(BN(channels[i], **bn_kwargs))

        layers.append(activation[i - 1]())

        if batch_norm[i - 1] and batchnorm_position == "right":
            layers.append(BN(channels[i], **bn_kwargs))

        if dropout[i - 1] > 0 and dropout_position == "right":
            layers.append(Dropout(p=dropout[i - 1]))

    return Seq(*layers)
Exemplo n.º 10
0
    def __init__(self, edge_dim, dim_init, dim):
        super(GINEConv, self).__init__(aggr="add")

        self.edge_encoder = Sequential(Linear(edge_dim, dim_init), ReLU(),
                                       Linear(dim_init, dim_init), ReLU(),
                                       BN(dim_init))
        self.mlp = Sequential(Linear(dim_init, dim), ReLU(), Linear(dim, dim),
                              ReLU(), BN(dim))
        self.eps = torch.nn.Parameter(torch.Tensor([0]))
        self.initial_eps = 0
Exemplo n.º 11
0
    def __init__(self, in_channels: int, out_channels: int, dim: int,
                 kernel_size: int, hidden_channels: Optional[int] = None,
                 dilation: int = 1, bias: bool = True, num_workers: int = 1):
        super(XConv, self).__init__()

        if knn_graph is None:
            raise ImportError('`XConv` requires `torch-cluster`.')

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.num_workers = num_workers

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
Exemplo n.º 12
0
    def __init__(
            self,
            n_actions,
            hidden_dim,
            featurizer,
            game_info,
            player_id,
            #neural_network_history,
            #neural_network_loss,
            learning_rate,
            optimizer,
            tensorboard=None,
            grad_clip=None,
            shared_network=None,
            q_network=None,
            cuda=False):
        super(PiNetworkBN, self).__init__()
        self.is_cuda = cuda
        self.n_actions = n_actions
        self.featurizer = featurizer
        self.hidden_dim = hidden_dim
        hdim = self.hidden_dim

        # SHARE WEIGHTS
        assert not (shared_network is not None and q_network is not None
                    ), "you should provide either q_network or shared_network"
        if q_network is not None:
            self.shared_network = q_network.shared_network
        else:
            if shared_network is not None:
                self.shared_network = shared_network
            else:
                self.shared_network = SharedNetworkBN(n_actions, hidden_dim)
        for i in range(19, 27):
            setattr(self, 'fc' + str(i),
                    getattr(self.shared_network, 'fc' + str(i)))
            setattr(self, 'bn' + str(i),
                    getattr(self.shared_network, 'bn' + str(i)))

        # LAST PERSONAL LAYERS
        self.fc27 = fc(hdim, hdim)
        self.bn27 = BN(hdim, momentum=.99)
        self.fc28 = fc(hdim, n_actions)
        self.bn28 = BN(n_actions, momentum=.99)

        self.optim = optim.Adam(self.parameters(), lr=learning_rate)
        if cuda:
            self.cuda()

        # for saving neural network history data
        self.game_info = game_info
        self.player_id = player_id  # know the owner of the network
        #self.neural_network_history = neural_network_history
        #self.neural_network_loss = neural_network_loss
        self.tensorboard = tensorboard
Exemplo n.º 13
0
    def __init__(self, params):
        super(Discriminator, self).__init__()
        self.model_str = 'WDisc'

        self.h_dim = params.get('hidden_dim', 100)
        self.batchnorm = False
        # self.batchnorm = params.get('batchnorm', True)

        self.fc1 = fc(784, self.h_dim)
        self.fc2 = fc(self.h_dim, 1)
        if self.batchnorm:
            self.bn_1 = BN(self.h_dim, eps=1e-5, momentum=.9)
            self.bn_2 = BN(1, eps=1e-5, momentum=.9)
Exemplo n.º 14
0
    def __init__(self, params):
        super(Generator, self).__init__()
        self.model_str = 'WGen'

        self.latent_dim = params.get('latent_dim', 2)
        self.h_dim = params.get('hidden_dim', 100)
        self.batchnorm = params.get('batchnorm', True)

        self.fc1 = fc(self.latent_dim, self.h_dim)
        self.fc2 = fc(self.h_dim, 784)
        if self.batchnorm:
            self.bn_1 = BN(self.h_dim, eps=1e-5, momentum=.9)
            self.bn_2 = BN(784, eps=1e-5, momentum=.9)
Exemplo n.º 15
0
 def __init__(self, inp_dim, inter_dim, oup_dim, cls):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(inp_dim, inter_dim)
     self.bn1 = BN(inter_dim)
     self.conv2 = SAGEConv(inter_dim, inter_dim)
     self.bn2 = BN(inter_dim)
     self.conv3 = SAGEConv(inter_dim, inter_dim)
     self.bn3 = BN(inter_dim)
     self.conv4 = SAGEConv(inter_dim, inter_dim)
     self.bn4 = BN(inter_dim)
     self.lin1 = torch.nn.Linear(inter_dim, inter_dim)
     self.lin2 = torch.nn.Linear(inter_dim, inter_dim)
     self.lin3 = torch.nn.Linear(inter_dim, oup_dim)
     self.cls = cls
Exemplo n.º 16
0
    def __init__(self, n_actions, hidden_dim, cuda=False):
        super(SharedNetworkBN, self).__init__()
        self.n_actions = n_actions
        self.hidden_dim = hidden_dim
        hdim = hidden_dim

        self.fc19 = fc(5 * 6 * 2, hdim)
        self.bn19 = BN(hdim, momentum=.99)
        self.fc20 = fc(5 * 6 * 2 + hdim, hdim)
        self.bn20 = BN(hdim, momentum=.99)
        self.fc21 = fc(5 * 6 * 2 + hdim, hdim)
        self.bn21 = BN(hdim, momentum=.99)
        self.fc22 = fc(5 * 6 * 2 + hdim, hdim)
        self.bn22 = BN(hdim, momentum=.99)
        self.fc23 = fc(hdim, hdim)
        self.bn23 = BN(hdim, momentum=.99)
        self.fc24 = fc(6, hdim)
        self.bn24 = BN(hdim, momentum=.99)
        self.fc25 = fc(3 * hdim, hdim)
        self.bn25 = BN(hdim, momentum=.99)
        self.fc26 = fc(hdim, hdim)
        self.bn26 = BN(hdim, momentum=.99)

        if cuda:
            self.cuda()
Exemplo n.º 17
0
def FirstBiMLP(channels,
               batch_norm=True,
               activation='ReLU',
               bilinear='BiLinear'):
    part1 = [
        Seq(Lin(channels[0], channels[1]), activations[activation](),
            BN(channels[1]))
    ]
    part2 = [
        Seq(biLinears[bilinear](channels[i - 1],
                                channels[i]), activations[activation](),
            BN(channels[i])) for i in range(2, len(channels))
    ]
    obj = part1 + part2
    return Seq(*obj)
Exemplo n.º 18
0
    def __init__(self,
                 hidden_channels,
                 num_layers,
                 max_z,
                 train_dataset,
                 use_feature=False,
                 node_embedding=None,
                 dropout=0.5,
                 jk=True,
                 train_eps=False):
        super(GIN, self).__init__()
        self.use_feature = use_feature
        self.node_embedding = node_embedding
        self.max_z = max_z
        self.z_embedding = Embedding(self.max_z, hidden_channels)
        self.jk = jk

        initial_channels = hidden_channels
        if self.use_feature:
            initial_channels += train_dataset.num_features
        if self.node_embedding is not None:
            initial_channels += node_embedding.embedding_dim
        self.conv1 = GINConv(Sequential(
            Linear(initial_channels, hidden_channels),
            ReLU(),
            Linear(hidden_channels, hidden_channels),
            ReLU(),
            BN(hidden_channels),
        ),
                             train_eps=train_eps)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(
                GINConv(Sequential(
                    Linear(hidden_channels, hidden_channels),
                    ReLU(),
                    Linear(hidden_channels, hidden_channels),
                    ReLU(),
                    BN(hidden_channels),
                ),
                        train_eps=train_eps))

        self.dropout = dropout
        if self.jk:
            self.lin1 = Linear(num_layers * hidden_channels, hidden_channels)
        else:
            self.lin1 = Linear(hidden_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, 1)
Exemplo n.º 19
0
 def __init__(self, inp_dim, inter_dim, oup_dim, cls):
     super(GAT, self).__init__()
     self.conv1 = GATConv(inp_dim, inter_dim)
     self.bn1 = BN(inter_dim)
     self.conv2 = GATConv(inter_dim, inter_dim)
     self.bn2 = BN(inter_dim)
     self.conv3 = GATConv(inter_dim, inter_dim)
     self.bn3 = BN(inter_dim)
     self.conv4 = GATConv(inter_dim, inter_dim)
     self.bn4 = BN(inter_dim)
     #self.conv5 = GATConv(inter_dim, inter_dim)
     #self.bn5 = BN(inter_dim)
     self.lin1 = torch.nn.Linear(inter_dim, inter_dim)
     self.lin2 = torch.nn.Linear(inter_dim, inter_dim)
     self.lin3 = torch.nn.Linear(inter_dim, oup_dim)
     self.cls = cls
Exemplo n.º 20
0
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 pool_op='max',
                 same_size=False):
        super(GCNConvNetBN, self).__init__()
        channels = [input_size, 64, 64, 64, 128, 1024]
        self.convs = nn.ModuleList()
        batch_norm = True
        for i in range(1, len(channels)):
            if batch_norm:
                self.convs.append(
                    Seq(GCN(channels[i - 1], channels[i]), ReLU(),
                        BN(channels[i])))
            else:
                self.convs.append(
                    Seq(GCN(channels[i - 1], channels[i]), ReLU()))

        if pool_op == 'max':
            self.pool = global_max_pool
        self.mlp = MLP([1024, 512, 256, embedding_size])
        self.fc = torch.nn.Linear(embedding_size, n_classes)
        self.emb_size = embedding_size
        self.same_size = same_size
        self.embedding = None
        print(self)
Exemplo n.º 21
0
    def __init__(self, n_layers, hidden_dim, input_dim, action_dim=2, gain=math.sqrt(2 / 1.01), activation=LeakyReLU(.01)):
        super().__init__()
        self.n_layers = n_layers
        self.action_dim = action_dim
        self.hidden_dim = hidden_dim
        self.activation = activation
        for l in range(n_layers - 1):  # in the last block there is no u
            if l > 0:
                setattr(self, 'u' + str(l), t.nn.Linear(hidden_dim, hidden_dim))
            else:
                setattr(self, 'u' + str(l), t.nn.Linear(input_dim, hidden_dim))
            if l < n_layers - 2:  # the last u does not have activation nor BN
                setattr(self, 'bn_u' + str(l), BN(hidden_dim, momentum=.9, eps=1e-3))

        for l in range(n_layers):
            output_dim = (l < n_layers - 1) * hidden_dim + (l == n_layers - 1) * 1  # the last z has dim 1 (Q value)
            idim = (l > 0) * hidden_dim + (l == 0) * input_dim  # the first block has input dim input dim, the next ones hidden dim
            setattr(self, 'z_u' + str(l), t.nn.Linear(idim, output_dim))
            setattr(self, 'z_au' + str(l), t.nn.Linear(idim, action_dim))
            setattr(self, 'z_au_' + str(l), t.nn.Linear(action_dim, output_dim, bias=False))
            if l > 0:  # at the first block, z is not yet defined
                setattr(self, 'z_zu' + str(l), t.nn.Linear(idim, hidden_dim))
                setattr(self, 'z_zu_' + str(l), t.nn.Linear(hidden_dim, output_dim, bias=False))

        self.initialize_weights_xavier(gain)

        # enforce convexity (or rather concavity)
        self.make_cvx()
def MLP(channels, batch_norm=True):
    net = []
    for i in range(1, len(channels)):
        net.append(Lin(channels[i-1], channels[i]))
        net.append(ReLU())
        net.append(BN(channels[i]))
    return Seq(*net)
Exemplo n.º 23
0
def MLP(channels, activation=nn.LeakyReLU(0.2), bn_momentum=0.1, bias=True):
    return Seq(
        *[
            Seq(Lin(channels[i - 1], channels[i], bias=bias), BN(channels[i], momentum=bn_momentum), activation)
            for i in range(1, len(channels))
        ]
    )
Exemplo n.º 24
0
def MLP(channels, batch_norm=True):
    return Seq(*[
        Seq(Lin(channels[i - 1], channels[i]), 
        BN(channels[i]), 
        ReLU())
        for i in range(1, len(channels))
    ])
Exemplo n.º 25
0
    def __init__(self, hidden, config, **kwargs):
        super(CombAConv, self).__init__(aggr='add', **kwargs)

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(
            Linear(hidden, hidden),
            ReLU(),
            Linear(hidden, hidden),
            self.fea_activation)

        self.aggr_mlp = Sequential(
            Linear(hidden * 2, hidden),
            Tanh())

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None

        self.edge_encoder = torch.nn.Linear(7, hidden)

        self.reset_parameters()
Exemplo n.º 26
0
    def __init__(self, hidden, num_aggr, config, **kwargs):
        super(ExpandingBConv, self).__init__(aggr='add', **kwargs)
        self.hidden = hidden
        self.num_aggr = num_aggr

        if config.fea_activation == 'ELU':
            self.fea_activation = ELU()
        elif config.fea_activation == 'ReLU':
            self.fea_activation = ReLU()

        self.fea_mlp = Sequential(
            Linear(hidden * self.num_aggr, hidden),
            ReLU(),
            Linear(hidden, hidden),
            self.fea_activation)

        self.aggr_mlp = Sequential(
            Linear(hidden * 2, self.num_aggr),
            Tanh())

        self.edge_encoder = torch.nn.Linear(5, hidden)

        if config.BN == 'Y':
            self.BN = BN(hidden)
        else:
            self.BN = None

        self.reset_parameters()
Exemplo n.º 27
0
    def vggnet(self, num_classes=21, groups=2):
        self.conv.add_module(
            'conv1_s1', nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=0))
        self.conv.add_module('relu1_s1', nn.ReLU(inplace=True))
        # self.conv.add_module('bn1_s1',nn.BatchNorm2d(96))
        self.conv.add_module('pool1_s1', nn.MaxPool2d(kernel_size=3, stride=2))
        self.conv.add_module('lrn1_s1',
                             BN(local_size=5, alpha=0.0001, beta=0.75))

        self.conv.add_module(
            'conv2_s1',
            nn.Conv2d(96, 256, kernel_size=5, padding=2, groups=groups))
        self.conv.add_module('relu2_s1', nn.ReLU(inplace=True))
        # self.conv.add_module('bn2_s1',nn.BatchNorm2d(256))
        self.conv.add_module('pool2_s1', nn.MaxPool2d(kernel_size=3, stride=2))
        self.conv.add_module('lrn2_s1',
                             BN(local_size=5, alpha=0.0001, beta=0.75))

        self.conv.add_module('conv3_s1',
                             nn.Conv2d(256, 384, kernel_size=3, padding=1))
        self.conv.add_module('relu3_s1', nn.ReLU(inplace=True))
        # self.conv.add_module('bn3_s1',nn.BatchNorm2d(384))

        self.conv.add_module(
            'conv4_s1',
            nn.Conv2d(384, 384, kernel_size=3, padding=1, groups=groups))
        # self.conv.add_module('bn4_s1',nn.BatchNorm2d(384))
        self.conv.add_module('relu4_s1', nn.ReLU(inplace=True))

        self.conv.add_module(
            'conv5_s1',
            nn.Conv2d(384, 256, kernel_size=3, padding=1, groups=groups))
        # self.conv.add_module('bn5_s1',nn.BatchNorm2d(256))
        self.conv.add_module('relu5_s1', nn.ReLU(inplace=True))
        self.conv.add_module('pool5_s1', nn.MaxPool2d(kernel_size=3, stride=2))

        self.fc6.add_module('fc6_s1', nn.Linear(256 * 6 * 6, 4096))
        self.fc6.add_module('relu6_s1', nn.ReLU(inplace=True))
        self.fc6.add_module('drop6_s1', nn.Dropout(p=0.5))

        self.fc7 = nn.Sequential()
        self.fc7.add_module('fc7', nn.Linear(4096, 4096))
        self.fc7.add_module('relu7', nn.ReLU(inplace=True))
        self.fc7.add_module('drop7', nn.Dropout(p=0.5))

        self.classifier = nn.Sequential()
        self.classifier.add_module('fc8', nn.Linear(4096, num_classes))
Exemplo n.º 28
0
    def __init__(self, num_classes, num_layers, feat_dim, embed_dim, jk_layer,
                 process_step, dropout):
        super(Net, self).__init__()

        self.dropout = dropout
        self.num_layers = num_layers
        self.convs = torch.nn.ModuleList()

        for i in range(num_layers):
            if i == 0:
                self.convs.append(
                    AGGINConv(Sequential(Linear(feat_dim, embed_dim), ReLU(),
                                         Linear(embed_dim, embed_dim), ReLU(),
                                         BN(embed_dim)),
                              train_eps=True,
                              dropout=self.dropout))
            else:
                self.convs.append(
                    AGGINConv(Sequential(Linear(embed_dim, embed_dim), ReLU(),
                                         Linear(embed_dim, embed_dim), ReLU(),
                                         BN(embed_dim)),
                              train_eps=True,
                              dropout=self.dropout))

        if jk_layer.isdigit():
            jk_layer = int(jk_layer)
            self.jump = JumpingKnowledge(mode='lstm',
                                         channels=embed_dim,
                                         num_layers=jk_layer)
            self.gpl = (Set2Set(embed_dim, processing_steps=process_step))
            self.fc1 = Linear(2 * embed_dim, embed_dim)
            # self.fc1 = Linear(embed_dim, embed_dim)
            self.fc2 = Linear(embed_dim, num_classes)
        elif jk_layer == 'cat':
            self.jump = JumpingKnowledge(mode=jk_layer)
            self.gpl = (Set2Set(num_layers * embed_dim,
                                processing_steps=process_step))
            self.fc1 = Linear(2 * embed_dim, embed_dim)
            # self.fc1 = Linear(num_layers * embed_dim, embed_dim)
            self.fc2 = Linear(embed_dim, num_classes)
        elif jk_layer == 'max':
            self.jump = JumpingKnowledge(mode=jk_layer)
            self.gpl = (Set2Set(embed_dim, processing_steps=process_step))
            self.fc1 = Linear(2 * embed_dim, embed_dim)
            # self.fc1 = Linear(embed_dim, embed_dim)
            self.fc2 = Linear(embed_dim, num_classes)
Exemplo n.º 29
0
def BwMLP(channels, batch_norm=True, activation='ReLU', bilinear='BiLinear'):
    return Seq(*[
        Seq(
            biLinears[bilinear](channels[i -
                                         1], channels[i], binary_act=False),
            activations[activation](), BN(channels[i]))
        for i in range(1, len(channels))
    ])
def MLP(channels, batch_norm=True):
    """Multi-layer perceptron, with ReLU non-linearities and batch normalization."""
    return Seq(*[
        Seq(
            Lin(channels[i - 1], channels[i]),
            BN(channels[i]) if batch_norm else nn.Identity(),
            LeakyReLU(negative_slope=0.2),
        ) for i in range(1, len(channels))
    ])