import pytest
from itertools import product

import torch
import torch.nn.functional as F
from torch.nn import ReLU, BatchNorm1d
from torch_geometric.nn import LayerNorm
from torch_geometric.nn.models import GCN, GraphSAGE, GIN, GAT

dropouts = [0.0, 0.5]
acts = [None, torch.relu_, F.elu, ReLU()]
norms = [None, BatchNorm1d(16), LayerNorm(16)]
jks = ['last', 'cat', 'max', 'lstm']


@pytest.mark.parametrize('dropout,act,norm,jk',
                         product(dropouts, acts, norms, jks))
def test_gcn(dropout, act, norm, jk):
    x = torch.randn(3, 8)
    edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
    out_channels = 16 if jk != 'cat' else 32

    model = GCN(in_channels=8,
                hidden_channels=16,
                num_layers=2,
                dropout=dropout,
                act=act,
                norm=norm,
                jk=jk)
    assert str(model) == f'GCN(8, {out_channels}, num_layers=2)'
    assert model(x, edge_index).size() == (3, out_channels)
Beispiel #2
0
    def __init__(self, in_dim=3):

        super(Generator, self).__init__()

        self.down = nn.Sequential(
        #k7n64s1  out H x W
        Conv2d(kernel_size=7, in_channels=in_dim, out_channels=64, stride=1, padding=3, bias=False),
        BatchNorm2d(64),
        ReLU(),

        #Down-convolution
        #k3n128s2, k3n128s1   out H/2 x W/2
        Conv2d(kernel_size=3, in_channels=64, out_channels=128, stride=2, padding=1, bias=False),
        Conv2d(kernel_size=3, in_channels=128, out_channels=128, stride=1, padding=1, bias=False),
        BatchNorm2d(128),
        ReLU(),

        # k3n256s2, k3n256s1 out H/4 x W/4
        Conv2d(kernel_size=3, in_channels=128, out_channels=256, stride=2, padding=1, bias=False),
        Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
        BatchNorm2d(256),
        ReLU() )

        self.res1 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256) )

        self.res2 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res3 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res4 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res5 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res6 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res7 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))

        self.res8 = nn.Sequential(
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256),
            ReLU(),
            Conv2d(kernel_size=3, in_channels=256, out_channels=256, stride=1, padding=1, bias=False),
            BatchNorm2d(256))


        self.up = nn.Sequential(
            #up-convolution  out H/2 x W/2
            ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
            Conv2d(kernel_size=3, in_channels=128, out_channels=128, stride=1, padding=1, bias=False),
            BatchNorm2d(128),
            ReLU(),

            # out H x W
            ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1,
                            bias=False),
            Conv2d(kernel_size=3, in_channels=64, out_channels=64, stride=1, padding=1, bias=False),
            BatchNorm2d(64),
            ReLU(),

            # Final conv
            Conv2d(kernel_size=7, in_channels=64, out_channels=3, stride=1, padding=3, bias=False),
            nn.Tanh())
Beispiel #3
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 out_dim,
                 dropout=0.5,
                 name='gat',
                 heads=8,
                 residual=True):
        super(GNNModelPYG, self).__init__()
        self.dropout = dropout
        self.name = name
        self.residual = None
        if residual:
            if in_dim == out_dim:
                self.residual = Identity()
            else:
                self.residual = Linear(in_dim, out_dim)

        if name == 'gat':
            self.conv1 = GATConv(in_dim,
                                 hidden_dim,
                                 heads=heads,
                                 dropout=dropout)
            self.conv2 = GATConv(hidden_dim * heads,
                                 out_dim,
                                 heads=1,
                                 concat=False,
                                 dropout=dropout)
        elif name == 'gcn':
            self.conv1 = GCNConv(in_dim,
                                 hidden_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
            self.conv2 = GCNConv(hidden_dim,
                                 out_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
        elif name == 'cheb':
            self.conv1 = ChebConv(in_dim, hidden_dim, K=2)
            self.conv2 = ChebConv(hidden_dim, out_dim, K=2)
        elif name == 'spline':
            self.conv1 = SplineConv(in_dim, hidden_dim, dim=1, kernel_size=2)
            self.conv2 = SplineConv(hidden_dim, out_dim, dim=1, kernel_size=2)
        elif name == 'gin':
            self.conv1 = GINConv(
                Sequential(Linear(in_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, hidden_dim)))
            self.conv2 = GINConv(
                Sequential(Linear(hidden_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, out_dim)))
        elif name == 'unet':
            self.conv1 = GraphUNet(in_dim, hidden_dim, out_dim, depth=3)
        elif name == 'agnn':
            self.lin1 = Linear(in_dim, hidden_dim)
            self.conv1 = AGNNConv(requires_grad=False)
            self.conv2 = AGNNConv(requires_grad=True)
            self.lin2 = Linear(hidden_dim, out_dim)
        else:
            raise NotImplemented("""
            Unknown model name. Choose from gat, gcn, cheb, spline, gin, unet, agnn."""
                                 )
Beispiel #4
0
 def __init__(self, dim_obs, dim_action, hidden_units):
     super().__init__(SacLinear(dim_obs, hidden_units), ReLU(),
                      SacLinear(hidden_units, hidden_units), ReLU(),
                      TanhNormalLayer(hidden_units, dim_action))
 def __init__(self,
              in_channels,
              channels,
              kernel_size,
              stride=(1, 1),
              padding=(0, 0),
              dilation=(1, 1),
              groups=1,
              bias=True,
              radix=2,
              reduction_factor=4,
              rectify=False,
              rectify_avg=False,
              norm=None,
              dropblock_prob=0.0,
              **kwargs):
     super(SplAtConv2d, self).__init__()
     padding = _pair(padding)
     self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
     self.rectify_avg = rectify_avg
     inter_channels = max(in_channels * radix // reduction_factor, 32)
     self.radix = radix
     self.cardinality = groups
     self.channels = channels
     self.dropblock_prob = dropblock_prob
     if self.rectify:
         from rfconv import RFConv2d
         self.conv = RFConv2d(in_channels,
                              channels * radix,
                              kernel_size,
                              stride,
                              padding,
                              dilation,
                              groups=groups * radix,
                              bias=bias,
                              average_mode=rectify_avg,
                              **kwargs)
     else:
         self.conv = Conv2d(in_channels,
                            channels * radix,
                            kernel_size,
                            stride,
                            padding,
                            dilation,
                            groups=groups * radix,
                            bias=bias,
                            **kwargs)
     self.use_bn = norm is not None
     if self.use_bn:
         self.bn0 = get_norm(norm, channels * radix)
     self.relu = ReLU(inplace=True)
     self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
     if self.use_bn:
         self.bn1 = get_norm(norm, inter_channels)
     self.fc2 = Conv2d(inter_channels,
                       channels * radix,
                       1,
                       groups=self.cardinality)
     if dropblock_prob > 0.0:
         self.dropblock = DropBlock2D(dropblock_prob, 3)
     self.rsoftmax = rSoftMax(radix, groups)
Beispiel #6
0
 def __init__(self, i, o):
     super(Residual, self).__init__()
     self.fc = Linear(i, o)
     self.bn = BatchNorm1d(o)
     self.relu = ReLU()
Beispiel #7
0
def MLP(channels, batch_norm=True):
    return Seq(*[
        Seq(Lin(channels[i - 1], channels[i]), ReLU(),
            nn.GroupNorm(max(1, channels[i] // 16), channels[i]))
        for i in range(1, len(channels))
    ])
Beispiel #8
0
"""Split-Attention"""
    def __init__(self, cfg, vocab):
        super(KumaDecompAttModel, self).__init__()
        self.cfg = cfg
        self.embed = nn.Embedding(cfg.n_embed,
                                  cfg.embed_size,
                                  padding_idx=cfg.pad_idx)
        self.vocab = vocab
        self.pad_idx = cfg.pad_idx
        self.dist_type = cfg.dist if cfg.dist else ""
        self.use_self_attention = cfg.self_attention
        self.selection = cfg.selection

        inp_dim = cfg.embed_size
        dim = cfg.hidden_size

        if cfg.fix_emb:
            self.embed.weight.requires_grad = False

        if self.cfg.projection:
            self.projection = nn.Linear(cfg.embed_size, cfg.proj_size)
            inp_dim = cfg.proj_size

        self.dropout = Dropout(p=cfg.dropout)
        self.activation = ReLU()

        if cfg.self_attention:
            self.max_dist = 11
            self.dist_embed = Embedding(2 * self.max_dist + 1, 1)
            self.self_attention = DeepDotAttention(inp_dim,
                                                   dim,
                                                   dropout=cfg.dropout)

            inp_dim = inp_dim * 2
            self.self_att_dropout = Dropout(p=cfg.dropout)

        # set attention mechanism (between premise and hypothesis)
        if "kuma" in self.dist_type:
            self.attention = KumaAttention(inp_dim,
                                           dim,
                                           dropout=cfg.dropout,
                                           dist_type=self.dist_type)
        else:
            self.attention = DeepDotAttention(inp_dim,
                                              dim,
                                              dropout=cfg.dropout)

        self.compare_layer = nn.Sequential(Linear(inp_dim * 2, dim),
                                           self.activation, self.dropout,
                                           Linear(dim, dim), self.activation,
                                           self.dropout)

        self.aggregate_layer = nn.Sequential(Linear(dim * 2, dim),
                                             self.activation, self.dropout,
                                             Linear(dim, dim), self.activation,
                                             self.dropout)

        self.output_layer = Linear(dim, cfg.output_size, bias=False)

        # lagrange (for controlling HardKuma attention percentage)
        self.lagrange_lr = cfg.lagrange_lr
        self.lagrange_alpha = cfg.lagrange_alpha
        self.lambda_init = cfg.lambda_init
        self.register_buffer('lambda0', torch.full((1, ), self.lambda_init))
        self.register_buffer('c0_ma', torch.full((1, ), 0.))  # moving average

        # for extracting attention
        self.hypo_mask = None
        self.prem_mask = None
        self.prem2hypo_att = None
        self.hypo2prem_att = None
        self.prem_self_att = None
        self.hypo_self_att = None
        self.prem_self_att_dist = None
        self.hypo_self_att_dist = None

        self.mask_diagonal = cfg.mask_diagonal
        self.relu_projection = False
        self.use_self_att_dropout = False

        self.reset_params()
        self.criterion = nn.CrossEntropyLoss(reduction='sum')
Beispiel #10
0
    def __init__(self, dim):
        super(NetGIN, self).__init__()

        num_features = 83

        nn1_1 = Sequential(Linear(num_features, dim),
                           torch.nn.BatchNorm1d(dim), ReLU(), Linear(dim, dim),
                           torch.nn.BatchNorm1d(dim), ReLU())
        nn1_2 = Sequential(Linear(num_features, dim),
                           torch.nn.BatchNorm1d(dim), ReLU(), Linear(dim, dim),
                           torch.nn.BatchNorm1d(dim), ReLU())
        self.conv1_1 = GINConv(nn1_1, train_eps=True)
        self.conv1_2 = GINConv(nn1_2, train_eps=True)
        self.mlp_1 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        nn2_1 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        nn2_2 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        self.conv2_1 = GINConv(nn2_1, train_eps=True)
        self.conv2_2 = GINConv(nn2_2, train_eps=True)
        self.mlp_2 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        nn3_1 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        nn3_2 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        self.conv3_1 = GINConv(nn3_1, train_eps=True)
        self.conv3_2 = GINConv(nn3_2, train_eps=True)
        self.mlp_3 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        nn4_1 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        nn4_2 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        self.conv4_1 = GINConv(nn4_1, train_eps=True)
        self.conv4_2 = GINConv(nn4_2, train_eps=True)
        self.mlp_4 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        nn5_1 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        nn5_2 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        self.conv5_1 = GINConv(nn5_1, train_eps=True)
        self.conv5_2 = GINConv(nn5_2, train_eps=True)
        self.mlp_5 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        nn6_1 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        nn6_2 = Sequential(Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU(),
                           Linear(dim, dim), torch.nn.BatchNorm1d(dim), ReLU())
        self.conv6_1 = GINConv(nn6_1, train_eps=True)
        self.conv6_2 = GINConv(nn6_2, train_eps=True)
        self.mlp_6 = Sequential(Linear(2 * dim, dim),
                                torch.nn.BatchNorm1d(dim), ReLU(),
                                Linear(dim, dim), torch.nn.BatchNorm1d(dim),
                                ReLU())

        self.set2set = Set2Set(1 * dim, processing_steps=6)
        self.fc1 = Linear(2 * dim, dim)
        self.fc4 = Linear(dim, 12)
Beispiel #11
0
            num_features = dataset.num_features
        model = SAGEConv_3d(num_features, hidden)
    else:
        print('using 1st com graphsage')
        if data_use == 'pt':
            num_features = 1
        else:
            num_features = dataset.num_features
        model = SAGEConv_3d_com(num_features, hidden)
elif args.m == "GIN":
    if args.order == 'agg':
        if data_use == 'pt':
            num_features = 1
        else:
            num_features = dataset.num_features
        nn1 = Sequential(Linear(num_features, args.hidden), ReLU(),
                         Linear(args.hidden, args.hidden))
        print('using 1st agg GIN , hid=', args.hidden)
        model = GINConv_3d(nn1)
    else:
        if data_use == 'pt':
            num_features = 1
        else:
            num_features = dataset.num_features
        nn1 = Sequential(Linear(num_features, args.hidden), ReLU(),
                         Linear(args.hidden, args.hidden))
        print('using 1st com GIN , hid=', args.hidden)
        model = GINConv_3d_com(nn1)
else:
    print('error,model not exit')
    def __init__(self, name, in_size, device):
        super(FCN, self).__init__()

        assert (in_size % 16 == 0)

        self.name = name
        self.in_size = in_size
        self.device = device

        self.convBlock1 = Sequential(
            Conv2d(in_channels=3, kernel_size=5, out_channels=32, stride=2, padding=2),
            BatchNorm2d(num_features=32, momentum=0.1),
            ReLU(inplace=True),

            Conv2d(in_channels=32, kernel_size=3, out_channels=32, stride=1, padding=1),
            BatchNorm2d(num_features=32, momentum=0.1),
            ReLU(inplace=True)
        )

        self.upsampling1 = ConvTranspose2d(in_channels=32, kernel_size=int(self.in_size / 2) + 1, out_channels=1,
                                           stride=1,
                                           padding=0)

        self.pool1 = MaxPool2d(kernel_size=2, stride=2)

        self.convBlock2 = Sequential(
            Conv2d(in_channels=32, kernel_size=3, out_channels=64, stride=1, padding=1),
            BatchNorm2d(num_features=64, momentum=0.1),
            ReLU(inplace=True),

            Conv2d(in_channels=64, kernel_size=3, out_channels=64, stride=1, padding=1),
            BatchNorm2d(num_features=64, momentum=0.1),
            ReLU(inplace=True)
        )

        self.upsampling2 = ConvTranspose2d(in_channels=64, kernel_size=3 * int(self.in_size / 4) + 1, out_channels=1,
                                           stride=1, padding=0)

        self.pool2 = MaxPool2d(kernel_size=2, stride=2)

        self.convBlock3 = Sequential(
            Conv2d(in_channels=64, kernel_size=3, out_channels=96, stride=1, padding=1),
            BatchNorm2d(num_features=96, momentum=0.1),
            ReLU(inplace=True),

            Conv2d(in_channels=96, kernel_size=3, out_channels=96, stride=1, padding=1),
            BatchNorm2d(num_features=96, momentum=0.1),
            ReLU(inplace=True)
        )

        self.upsampling3 = ConvTranspose2d(in_channels=96, kernel_size=7 * int(self.in_size / 8) + 1, out_channels=1,
                                           stride=1, padding=0)

        self.pool3 = MaxPool2d(kernel_size=2, stride=2)

        self.convBlock4 = Sequential(
            Conv2d(in_channels=96, kernel_size=3, out_channels=128, stride=1, padding=1),
            BatchNorm2d(num_features=128, momentum=0.1),
            ReLU(inplace=True),

            Conv2d(in_channels=128, kernel_size=3, out_channels=128, stride=1, padding=1),
            BatchNorm2d(num_features=128, momentum=0.1),
            ReLU(inplace=True)
        )

        self.upsampling4 = ConvTranspose2d(in_channels=128, kernel_size=15 * int(self.in_size / 16) + 1, out_channels=1,
                                           stride=1, padding=0)

        self.convScore = Sequential(
            Conv2d(in_channels=4, kernel_size=1, out_channels=1, stride=1, padding=0),
            Sigmoid()
        )

        self = self.to(device)

        self.optimizer = SGD(self.parameters(), lr=LR_SGD, momentum=MOMENTUM_SGD,
                             nesterov=True, weight_decay=WD_SGD)
Beispiel #13
0
    def __init__(self, num_classes, is_test=False, config=None, device=None):
        """Compose a SSD model using the given components.
		"""
        super(SSD, self).__init__()

        # alpha = 1
        # alpha_base = alpha
        # alpha_ssd = 0.5 * alpha
        # alpha_lstm = 0.25 * alpha

        self.num_classes = num_classes
        self.base_net = MobileNetV1()
        self.is_test = is_test
        self.config = config

        self.BottleneckLSTM_1 = ConvLSTMCell(1024, 256)
        self.BottleneckLSTM_2 = ConvLSTMCell(256, 64)
        self.BottleneckLSTM_3 = ConvLSTMCell(64, 16)
        self.BottleneckLSTM_4 = ConvLSTMCell(16, 4)
        self.BottleneckLSTM_5 = ConvLSTMCell(4, 1)

        self.extras = ModuleList([
            Sequential(
                Conv2d(in_channels=256, out_channels=128, kernel_size=1),
                ReLU(),
                conv_dw_1(inp=128, oup=256, kernel_size=3, stride=2,
                          padding=1), ReLU()),
            Sequential(
                Conv2d(in_channels=64, out_channels=32, kernel_size=1), ReLU(),
                conv_dw_1(inp=32, oup=64, kernel_size=3, stride=2, padding=1),
                ReLU()),
            Sequential(
                Conv2d(in_channels=16, out_channels=8, kernel_size=1), ReLU(),
                conv_dw_1(inp=8, oup=16, kernel_size=3, stride=2, padding=1),
                ReLU()),
            Sequential(
                Conv2d(in_channels=4, out_channels=2, kernel_size=1), ReLU(),
                conv_dw_1(inp=2, oup=4, kernel_size=3, stride=2, padding=1),
                ReLU())
        ])

        self.regression_headers = ModuleList([
            conv_dw_1(inp=512, oup=6 * 4, kernel_size=3, padding=1),
            conv_dw_1(inp=256, oup=6 * 4, kernel_size=3, padding=1),
            conv_dw_1(inp=64, oup=6 * 4, kernel_size=3, padding=1),
            conv_dw_1(inp=16, oup=6 * 4, kernel_size=3, padding=1),
            conv_dw_1(inp=4, oup=6 * 4, kernel_size=3, padding=1),
            conv_dw_1(inp=1, oup=6 * 4, kernel_size=3, padding=1),
        ])

        self.classification_headers = ModuleList([
            conv_dw_1(inp=512, oup=6 * num_classes, kernel_size=3, padding=1),
            conv_dw_1(inp=256, oup=6 * num_classes, kernel_size=3, padding=1),
            conv_dw_1(inp=64, oup=6 * num_classes, kernel_size=3, padding=1),
            conv_dw_1(inp=16, oup=6 * num_classes, kernel_size=3, padding=1),
            conv_dw_1(inp=4, oup=6 * num_classes, kernel_size=3, padding=1),
            conv_dw_1(inp=1, oup=6 * num_classes, kernel_size=3, padding=1),
        ])

        self.conv_13 = conv_dw(512, 1024, 2)

        if device:
            self.device = device
        else:
            self.device = torch.device(
                "cuda:1" if torch.cuda.is_available() else "cpu")
        if is_test:
            self.config = config
            self.priors = config.priors.to(self.device)
import os
import os.path as osp
from itertools import product

import pytest
import torch
import torch.nn.functional as F
from torch.nn import BatchNorm1d, ReLU

from torch_geometric.nn import LayerNorm
from torch_geometric.nn.models import GAT, GCN, GIN, PNA, GraphSAGE

out_dims = [None, 8]
dropouts = [0.0, 0.5]
acts = [None, 'leaky_relu', torch.relu_, F.elu, ReLU()]
norms = [None, BatchNorm1d(16), LayerNorm(16)]
jks = [None, 'last', 'cat', 'max', 'lstm']


@pytest.mark.parametrize('out_dim,dropout,act,norm,jk',
                         product(out_dims, dropouts, acts, norms, jks))
def test_gcn(out_dim, dropout, act, norm, jk):
    x = torch.randn(3, 8)
    edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
    out_channels = 16 if out_dim is None else out_dim

    model = GCN(8,
                16,
                num_layers=2,
                out_channels=out_dim,
                dropout=dropout,
    def __init__(self, d1=3, d2=50, d3=15, d4=15, d5=10, num_classes=6):
        super(META4, self).__init__()

        self.edge_mlp = Seq(Lin(d1 * 3, d2), ReLU(), Lin(d2, d3))
        self.node_mlp = Seq(Lin(d1 * 6, d2), ReLU(), Lin(d2, d3))
        self.global_mlp = Seq(Lin(16, d2), ReLU(), Lin(d2, d3))

        self.fc1 = nn.Linear(d4, d5)
        self.dense1_bn = nn.BatchNorm1d(d5)
        self.fc2 = nn.Linear(d5, num_classes)
        self.dense2_bn = nn.BatchNorm1d(num_classes)
        self.global_pool = global_mean_pool

        def edge_model(source, target, edge_attr, u):
            # source, target: [E, F_x], where E is the number of edges.
            # edge_attr: [E, F_e]
            # u: [B, F_u], where B is the number of graphs.
            #print("edge_model")
            #print(source.size())
            #print(target.size())
            #print(edge_attr.size())
            out = torch.cat([source, target, edge_attr], dim=1)
            return self.edge_mlp(out)

        def node_model(x, edge_index, edge_attr, u):
            # x: [N, F_x], where N is the number of nodes.
            # edge_index: [2, E] with max entry N - 1.
            # edge_attr: [E, F_e]
            # u: [B, F_u]

            row, col = edge_index

            #print("node_model")
            #print(row.size())
            #print(col.size())
            #print(x[col].size())
            #print(edge_attr.size())

            out = torch.cat([x[col], edge_attr], dim=1)
            out = self.node_mlp(out)
            return scatter_mean(out, row, dim=0, dim_size=x.size(0))

        def global_model(x, edge_index, edge_attr, u, batch):
            # x: [N, F_x], where N is the number of nodes.
            # edge_index: [2, E] with max entry N - 1.
            # edge_attr: [E, F_e]
            # u: [B, F_u]
            # batch: [N] with max entry B - 1.

            #print("global_Model")
            #print("u.size():")
            #print(u.size())
            #print("scatter_mean(x,batch,..):")
            #smean = scatter_mean(x, batch, dim=0)
            #print(smean.size())

            out = torch.cat([u, scatter_mean(x, batch, dim=0)], dim=1)

            #print("out.size():")
            #print(out.size())
            return self.global_mlp(out)

        self.op = MetaLayer(edge_model, node_model, global_model)
Beispiel #16
0
model.bias
'''
Parameter containing:
 0.2208
 0.2452
-0.1153
 0.1328
-0.0708
[torch.FloatTensor of size 5]'''

#Funções de Ativação não-linear
from torch.nn import ReLU

#exemplo RELU
data = Variable(torch.Tensor([1, 2, -1, -2]))
relu = ReLU()
relu(data)
'''
Variable containing:
 1
 2
 0
 0
[torch.FloatTensor of size 4] '''

#Funções de Custo
import torch
import torch.nn
from torch.autograd import Variable

inputs = Variable(torch.randn(3, 5), requires_grad=True)
Beispiel #17
0
 def __init__(self):
     super(MNISTNet, self).__init__()
     self.features = Sequential(
         Linear(in_features=28 * 28, out_features=14 * 14), ReLU(),
         Linear(in_features=14 * 14, out_features=10))
Beispiel #18
0
    def __init__(self, dim):
        super(NetGIN, self).__init__()

        num_features = 492

        nn1_1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        nn1_2 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1_1 = GINConv(nn1_1, train_eps=True)
        self.conv1_2 = GINConv(nn1_2, train_eps=True)
        self.bn1 = torch.nn.BatchNorm1d(dim)
        self.mlp_1 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn2_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn2_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2_1 = GINConv(nn2_1, train_eps=True)
        self.conv2_2 = GINConv(nn2_2, train_eps=True)
        self.bn2 = torch.nn.BatchNorm1d(dim)
        self.mlp_2 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn3_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn3_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3_1 = GINConv(nn3_1, train_eps=True)
        self.conv3_2 = GINConv(nn3_2, train_eps=True)
        self.bn3 = torch.nn.BatchNorm1d(dim)
        self.mlp_3 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        nn4_1 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        nn4_2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4_1 = GINConv(nn4_1, train_eps=True)
        self.conv4_2 = GINConv(nn4_2, train_eps=True)
        self.bn4 = torch.nn.BatchNorm1d(dim)
        self.mlp_4 = Sequential(Linear(2 * dim, dim), ReLU(), Linear(dim, dim))

        self.fc1 = Linear(4 * dim, dim)
        self.fc2 = Linear(dim, dim)
        self.fc3 = Linear(dim, dim)
        self.fc4 = Linear(dim, 1)
Beispiel #19
0
# This is the number of neuron in the hidden layer. We can play around with this number.
# More neurons and more layers(not included in this code), means a complex combination
# of the input parameters.
hidden_size = 2

# Since we are generating a binary class, we need to identify to which blob (class) the
# point belongs to.
output_size = 1

y = np.reshape(y, (len(y), 1))
inputs = torch.tensor(X, dtype=torch.float)
labels = torch.tensor(y, dtype=torch.float)

# We write a simple sequential two layer neural network model.
model = Sequential(Linear(in_features=input_size, out_features=hidden_size),
                   ReLU(),
                   Linear(in_features=input_size, out_features=output_size),
                   Sigmoid())

# Setup the loss function. We are currently using Binary Cross Entropy
# You can also use torch.nn.BCEWithLogitsLoss and remove the Sigmoid
# layer from the model as this is already included in the loss function.
criterion = torch.nn.BCELoss(reduction='mean')

# Setup the optimizer to determine the parameters for the neural network
# to do binary classification. Do play around this other optimizers.
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

# How many epochs should be used for the model training?
num_epochs = 30
    def _initialize_layers(self):
        """Set the network's layers used in the forward pass"""

        n_in_channels = self.config['n_channels']
        n_classes = self.config['n_classes']

        self.conv1 = Conv2d(
            in_channels=n_in_channels, out_channels=64,
            kernel_size=(7, 7), stride=(2, 2), padding=(3, 3)
        )
        self.max_pooling1 = MaxPool2d(
            kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)
        )

        self.conv2 = Conv2d(
            in_channels=64, out_channels=192,
            kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
        )
        self.conv3 = Conv2d(
            in_channels=192, out_channels=192,
            kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
        )
        self.max_pooling2 = MaxPool2d(
            kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)
        )

        self.inception_3a = InceptionModule(
            n_in_channels=192, n_11_channels=64, n_33_reduce_channels=96,
            n_33_channels=128, n_55_reduce_channels=16, n_55_channels=32,
            n_post_pool_channels=32
        )
        self.inception_3b = InceptionModule(
            n_in_channels=256, n_11_channels=128, n_33_reduce_channels=128,
            n_33_channels=192, n_55_reduce_channels=32, n_55_channels=96,
            n_post_pool_channels=64
        )
        self.max_pooling3 = MaxPool2d(
            kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)
        )

        self.inception_4a = InceptionModule(
            n_in_channels=480, n_11_channels=192, n_33_reduce_channels=96,
            n_33_channels=208, n_55_reduce_channels=16, n_55_channels=48,
            n_post_pool_channels=64
        )
        self.inception_4b = InceptionModule(
            n_in_channels=512, n_11_channels=160, n_33_reduce_channels=112,
            n_33_channels=224, n_55_reduce_channels=24, n_55_channels=64,
            n_post_pool_channels=64
        )
        self.inception_4c = InceptionModule(
            n_in_channels=512, n_11_channels=128, n_33_reduce_channels=128,
            n_33_channels=256, n_55_reduce_channels=24, n_55_channels=64,
            n_post_pool_channels=64
        )
        self.inception_4d = InceptionModule(
            n_in_channels=512, n_11_channels=112, n_33_reduce_channels=144,
            n_33_channels=288, n_55_reduce_channels=32, n_55_channels=64,
            n_post_pool_channels=64
        )
        self.inception_4e = InceptionModule(
            n_in_channels=528, n_11_channels=256, n_33_reduce_channels=160,
            n_33_channels=320, n_55_reduce_channels=32, n_55_channels=128,
            n_post_pool_channels=128
        )
        self.max_pooling4 = MaxPool2d(
            kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)
        )

        self.inception_5a = InceptionModule(
            n_in_channels=832, n_11_channels=256, n_33_reduce_channels=160,
            n_33_channels=320, n_55_reduce_channels=32, n_55_channels=128,
            n_post_pool_channels=128
        )
        self.inception_5b = InceptionModule(
            n_in_channels=832, n_11_channels=384, n_33_reduce_channels=192,
            n_33_channels=384, n_55_reduce_channels=48, n_55_channels=128,
            n_post_pool_channels=128
        )

        self.average_pooling = AvgPool2d(kernel_size=(7, 7))
        self.dropout = Dropout(0.4)
        self.linear = Linear(
            in_features=1024, out_features=n_classes
        )

        self.auxiliary_classifier1 = AuxiliaryClassifier(
            n_in_channels=512, n_classes=n_classes
        )
        self.auxiliary_classifier2 = AuxiliaryClassifier(
            n_in_channels=528, n_classes=n_classes
        )

        self.relu = ReLU()
Beispiel #21
0
 def __init__(self, dim_obs, dim_action, hidden_units):
     super().__init__(SacLinear(dim_obs + dim_action, hidden_units), ReLU(),
                      SacLinear(hidden_units, hidden_units), ReLU(),
                      Linear(hidden_units, 1))
    def __init__(self, n_in_channels, n_11_channels, n_33_reduce_channels,
                 n_33_channels, n_55_reduce_channels, n_55_channels,
                 n_post_pool_channels):
        """Init

        :param n_in_channels: number of channels in the inputs passed to the
         `forward` method
        :type n_in_channels: int
        :param n_11_channels: number of channels to use in the 1x1 convolution
         applied to the input
        :type n_11_channels: int
        :param n_33_reduce_channels: number of channels to use in the 1x1
         convolution applied to the input to reduce the number of channels
         before a 3x3 convolution
        :type n_33_reduce_channels: int
        :param n_33_channels: number of channels to use in the 3x3 convolution
         applied to the input
        :type n_33_channels: int
        :param n_55_reduce_channels: number of channels to use in the 1x1
         convolution applied to the input to reduce the number of channels
         before a 5x5 convolution
        :type n_55_reduce_channels: int
        :param n_55_channels: number of channels to use in the 5x5 convolution
         applied to the input
        :type n_55_channels: int
        :param n_post_pool_channels: number of channels to use in the
         convolution applied to the output of the 3x3 pooling
        :type n_post_pool_channels: int
        """

        super().__init__()

        self.conv_11 = Conv2d(
            in_channels=n_in_channels, out_channels=n_11_channels,
            kernel_size=(1, 1), stride=(1, 1)
        )
        self.conv_33_reduce = Conv2d(
            in_channels=n_in_channels, out_channels=n_33_reduce_channels,
            kernel_size=(1, 1), stride=(1, 1)
        )
        self.conv_55_reduce = Conv2d(
            in_channels=n_in_channels, out_channels=n_55_reduce_channels,
            kernel_size=(1, 1), stride=(1, 1)
        )

        self.conv_33 = Conv2d(
            in_channels=n_33_reduce_channels, out_channels=n_33_channels,
            kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
        )
        self.conv_55 = Conv2d(
            in_channels=n_55_reduce_channels, out_channels=n_55_channels,
            kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)
        )

        self.max_pooling = MaxPool2d(
            kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
        )
        self.conv_post_pooling = Conv2d(
            in_channels=n_in_channels, out_channels=n_post_pool_channels,
            kernel_size=(1, 1), stride=(1, 1)
        )

        self.relu = ReLU()
 def __init__(self, channel, red=16):
     super(SCSEBlock, self).__init__()
     self.avg_pool = AdaptiveAvgPool2d(1)
     self.fc = Sequential(Linear(channel, red), ReLU(inplace=True), Linear(red, channel), Sigmoid())
    def __define_grouped_convolutions(self, input_shape, n_groups,
                                      n_channels_per_branch, is_dilated,
                                      layer_num):
        '''
        Define layers inside grouped convolutional block. 定义timeception中的操作
        :param input_shape: (32, 1024, 128, 7, 7):1024输入的通道数
        :param n_groups: 8
        :param n_channels_per_branch: [32,40,50,62]
        :param is_dilated:
        :param layer_num: 当前的层数
        :return:
        '''

        n_channels_in = input_shape[1]

        n_branches = 5
        n_channels_per_group_in = int(n_channels_in /
                                      n_groups)  #每个group输入通道的个数 = 128
        n_channels_out = int(n_groups * n_branches *
                             n_channels_per_branch)  #整个temporal层的输出通道数
        n_channels_per_group_out = int(n_channels_out /
                                       n_groups)  #每个group的输出通道的个数

        assert n_channels_in % n_groups == 0  #断言表达式,否的话抛出异常
        assert n_channels_out % n_groups == 0

        # type of multi-scale kernels to use: either multi_kernel_sizes or multi_dilation_rates
        # 设置膨胀与核大小已应对多变的复杂动作时长
        if is_dilated:
            kernel_sizes = (3, 3, 3)
            dilation_rates = (1, 2, 3)
        else:
            kernel_sizes = (3, 5, 7)
            dilation_rates = (1, 1, 1)

        input_shape_per_group = list(input_shape)
        input_shape_per_group[
            1] = n_channels_per_group_in  #(32, 128, 128, 7, 7)

        # loop on groups, and define convolutions in each group
        # 对每一层的每一个group进行定义具体的操作
        for idx_group in range(n_groups):
            group_num = idx_group + 1  #当前的group数
            #定义每个group的操作
            self.__define_temporal_convolutional_block(input_shape_per_group,
                                                       n_channels_per_branch,
                                                       kernel_sizes,
                                                       dilation_rates,
                                                       layer_num, group_num)

        # activation 定义激活操作
        layer_name = 'relu_tc%d' % (layer_num)
        layer = ReLU()
        layer._name = layer_name
        setattr(self, layer_name, layer)

        # shuffle channels 定义混洗操作
        layer_name = 'shuffle_tc%d' % (layer_num)
        layer = ChannelShuffleLayer(n_channels_out, n_groups)
        layer._name = layer_name
        setattr(self, layer_name, layer)
def create_mobilenetv1_ssd(num_classes, is_test=False):
    base_net = MobileNetV1(1001).model  # disable dropout layer

    source_layer_indexes = [
        12,
        14,
    ]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=1024, out_channels=256, kernel_size=1), ReLU(),
            Conv2d(in_channels=256,
                   out_channels=512,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=512, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU())
    ])

    regression_headers = ModuleList([
        Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3,
               padding=1),  # TODO: change to kernel_size=1, padding=0?
    ])

    classification_headers = ModuleList([
        Conv2d(in_channels=512,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=1024,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=512,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),  # TODO: change to kernel_size=1, padding=0?
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
Beispiel #26
0
    def test_conv_bn_relu(self, batch_size, input_channels_per_group, height,
                          width, output_channels_per_group, groups, kernel_h,
                          kernel_w, stride_h, stride_w, pad_h, pad_w, dilation,
                          padding_mode, use_relu, eps, momentum, freeze_bn):
        input_channels = input_channels_per_group * groups
        output_channels = output_channels_per_group * groups
        dilation_h = dilation_w = dilation

        conv_op = Conv2d(
            input_channels,
            output_channels,
            (kernel_h, kernel_w),
            (stride_h, stride_w),
            (pad_h, pad_w),
            (dilation_h, dilation_w),
            groups,
            False,  # No bias
            padding_mode).to(dtype=torch.double)
        bn_op = BatchNorm2d(output_channels, eps,
                            momentum).to(dtype=torch.double)
        relu_op = ReLU()

        cls = ConvBnReLU2d if use_relu else ConvBn2d
        qat_op = cls(
            input_channels,
            output_channels,
            (kernel_h, kernel_w),
            (stride_h, stride_w),
            (pad_h, pad_w),
            (dilation_h, dilation_w),
            groups,
            None,  # bias
            padding_mode,
            eps,
            momentum,
            freeze_bn=True,
            qconfig=default_qat_qconfig).to(dtype=torch.double)
        qat_op.apply(torch.ao.quantization.disable_fake_quant)
        if freeze_bn:
            qat_op.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
        else:
            qat_op.apply(torch.nn.intrinsic.qat.update_bn_stats)

        # align inputs and internal parameters
        input = torch.randn(batch_size,
                            input_channels,
                            height,
                            width,
                            dtype=torch.double,
                            requires_grad=True)
        conv_op.weight = torch.nn.Parameter(qat_op.weight.detach())
        bn_op.running_mean = qat_op.bn.running_mean.clone()
        bn_op.running_var = qat_op.bn.running_var.clone()
        bn_op.weight = torch.nn.Parameter(qat_op.bn.weight.detach())
        bn_op.bias = torch.nn.Parameter(qat_op.bn.bias.detach())

        def compose(functions):
            # functions are reversed for natural reading order
            return reduce(lambda f, g: lambda x: f(g(x)), functions[::-1],
                          lambda x: x)

        if not use_relu:

            def relu_op(x):
                return x

        if freeze_bn:

            def ref_op(x):
                x = conv_op(x)
                x = (x - bn_op.running_mean.reshape([1, -1, 1, 1])) * \
                    (bn_op.weight / torch.sqrt(bn_op.running_var + bn_op.eps)) \
                    .reshape([1, -1, 1, 1]) + bn_op.bias.reshape([1, -1, 1, 1])
                x = relu_op(x)
                return x
        else:
            ref_op = compose([conv_op, bn_op, relu_op])

        input_clone = input.clone().detach().requires_grad_()
        for i in range(2):
            result_ref = ref_op(input)
            result_actual = qat_op(input_clone)
            self.assertEqual(result_ref, result_actual)

            # backward
            dout = torch.randn(result_ref.size(), dtype=torch.double)
            loss = (result_ref - dout).sum()
            loss.backward()
            input_grad_ref = input.grad.cpu()
            weight_grad_ref = conv_op.weight.grad.cpu()
            gamma_grad_ref = bn_op.weight.grad.cpu()
            beta_grad_ref = bn_op.bias.grad.cpu()
            running_mean_ref = bn_op.running_mean
            running_var_ref = bn_op.running_var
            num_batches_tracked_ref = bn_op.num_batches_tracked
            loss = (result_actual - dout).sum()
            loss.backward()
            input_grad_actual = input_clone.grad.cpu()
            weight_grad_actual = qat_op.weight.grad.cpu()
            gamma_grad_actual = qat_op.bn.weight.grad.cpu()
            beta_grad_actual = qat_op.bn.bias.grad.cpu()
            running_mean_actual = qat_op.bn.running_mean
            running_var_actual = qat_op.bn.running_var
            num_batches_tracked_actual = qat_op.bn.num_batches_tracked
            precision = 1e-10
            self.assertEqual(input_grad_ref,
                             input_grad_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(weight_grad_ref,
                             weight_grad_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(gamma_grad_ref,
                             gamma_grad_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(beta_grad_ref,
                             beta_grad_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(num_batches_tracked_ref,
                             num_batches_tracked_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(running_mean_ref,
                             running_mean_actual,
                             atol=precision,
                             rtol=0)
            self.assertEqual(running_var_ref,
                             running_var_actual,
                             atol=precision,
                             rtol=0)
Beispiel #27
0
    def _build_model(self):
        self.conv_kernel = (3, 3)
        self.max_pool_kernel = (2, 2)

        self.model = Sequential(
            Conv2d(4,
                   64,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(64,
                   64,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            MaxPool2d(kernel_size=self.max_pool_kernel, stride=1),  #stride=2
            ReLU(),
            Conv2d(64,
                   128,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(128,
                   128,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            MaxPool2d(kernel_size=self.max_pool_kernel, stride=1),  #stride=2
            ReLU(),
            Conv2d(128,
                   256,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(256,
                   256,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            MaxPool2d(kernel_size=self.max_pool_kernel, stride=1),
            ReLU(),
            Conv2d(
                256,
                512,
                kernel_size=self.conv_kernel,
                stride=1,  #stride=2
                padding=0,
                bias=True),
            Conv2d(512,
                   512,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            MaxPool2d(kernel_size=self.max_pool_kernel, stride=1),
            ReLU(),
            Conv2d(512,
                   1024,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(1024,
                   1024,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            ReLU(),
            #MaxUnpool2d(kernel_size=(2, 2), stride=2),
            Upsample(scale_factor=2, mode='bilinear'),
            Conv2d(1024,
                   512,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(512,
                   512,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            ReLU(),
            #MaxUnpool2d(kernel_size=(2, 2), stride=2),
            Upsample(scale_factor=2, mode='bilinear'),
            Conv2d(512,
                   256,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(256,
                   256,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            ReLU(),
            #MaxUnpool2d(kernel_size=(2, 2), stride=2),
            Upsample(scale_factor=2, mode='bilinear'),
            Conv2d(256,
                   128,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(128,
                   128,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            ReLU(),
            Upsample(scale_factor=2, mode='bilinear'),
            Conv2d(128,
                   64,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(64,
                   64,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            Conv2d(64,
                   1,
                   kernel_size=self.conv_kernel,
                   stride=1,
                   padding=0,
                   bias=True),
            ReLU(),
        )
Beispiel #28
0
 def __init__(self):
     super().__init__()
     self.mlp1 = Sequential(Linear(16, 16), ReLU(), Linear(16, 16))
     self.conv1 = SAGEConv(16, 32)
def MLP(channels, batch_norm=True):
    channels = channels
    return Seq(*[
        Seq(Lin(channels[i - 1], channels[i]), ReLU(), BN(channels[i]))
        for i in range(1, len(channels))
    ])
Beispiel #30
0
    def __init__(self):
        super(Net, self).__init__()

        #name
        self.name = "DirCNNfps"
        #optimizer
        self.lr = 0.001
        self.optimizer_name = 'Adam-Exp'

        #data
        self.data_name = "ModelNet10"
        #self.data_name = "Geometry"
        self.batch_size = 10
        self.nr_points = 1024
        self.nr_classes = 10 if self.data_name == 'ModelNet10' else 40

        #train_info
        self.max_epochs = 60
        self.save_every = 6
        

        #model
        self.k = 20
        self.l = 7
        
        # DD1
        self.in_size = 3
        self.out_size = 64
        layers = []
        layers.append(Linear(self.in_size, 64))
        layers.append(ReLU())
        layers.append(torch.nn.BatchNorm1d(64))
        layers.append(Linear(64 , 64))
        layers.append(ReLU())
        layers.append(torch.nn.BatchNorm1d(64))
        layers.append(Linear(64, self.out_size))
        layers.append(ReLU())
        layers.append(torch.nn.BatchNorm1d(self.out_size))
        dense3dnet = Sequential(*layers)
        self.dd = DD(l = self.l,
                        k = self.k,
                        mlp = dense3dnet,
                        conv_p  = True,
                        conv_fc = False,
                        conv_fn = False,
                        out_3d  = True)
        ## POOLING:
        self.ratio = 0.25
        self.nr_points_fps = self.nr_points * self.ratio
        if self.nr_points * self.ratio % 1 != 0:
            print("Not a good ratio!")
        self.nr_points_fps = int(self.nr_points_fps)

        # DD2
        self.in_size_2 = 64 * 3 
        self.out_size_2 = 128
        layers2 = []
        layers2.append(Linear(self.in_size_2, self.out_size_2))
        layers2.append(ReLU())
        layers2.append(torch.nn.BatchNorm1d(self.out_size_2))
        dense3dnet2 = Sequential(*layers2)
        self.dd2 = DD(l = self.l,
                        k = self.k,
                        mlp = dense3dnet2,
                        conv_p  = False,
                        conv_fc = False,
                        conv_fn = True,
                        out_3d  = False)


        self.nn1 = torch.nn.Linear(self.out_size_2, 1024)
        self.bn1 = torch.nn.BatchNorm1d(1024)
        self.nn2 = torch.nn.Linear(1024, 512)
        self.bn2 = torch.nn.BatchNorm1d(512)
        self.nn3 = torch.nn.Linear(512, 265)
        self.bn3 = torch.nn.BatchNorm1d(265)
        self.nn4 = torch.nn.Linear(265, self.nr_classes)

        self.sm = torch.nn.LogSoftmax(dim=1)