Exemplo n.º 1
0
    def __init__(self, cfg, in_channels):
        """
        Arguments:
            in_channels (int): number of channels of the input feature
            num_anchors (int): number of anchors to be predicted
        """
        super(RetinaNetHead, self).__init__()
        # TODO: Implement the sigmoid version first.
        num_classes = cfg.MODEL.RETINANET.NUM_CLASSES - 1
        num_anchors = len(cfg.MODEL.RETINANET.ASPECT_RATIOS) \
                        * cfg.MODEL.RETINANET.SCALES_PER_OCTAVE

        cls_tower = []
        bbox_tower = []
        for i in range(cfg.MODEL.RETINANET.NUM_CONVS):
            cls_tower.append(
                nn.Conv(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            cls_tower.append(nn.ReLU())
            bbox_tower.append(
                nn.Conv(
                    in_channels,
                    in_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1
                )
            )
            bbox_tower.append(nn.ReLU())

        self.cls_tower = nn.Sequential(*cls_tower)
        self.bbox_tower =  nn.Sequential(*bbox_tower)
        self.cls_logits = nn.Conv(
            in_channels, num_anchors * num_classes, kernel_size=3, stride=1,
            padding=1
        )
        self.bbox_pred = nn.Conv(
            in_channels,  num_anchors * 4, kernel_size=3, stride=1,
            padding=1
        )

        # Initialization
        for modules in [self.cls_tower, self.bbox_tower, self.cls_logits,
                  self.bbox_pred]:
            for l in modules.modules():
                if isinstance(l, nn.Conv):
                    init.gauss_(l.weight, std=0.01)
                    init.constant_(l.bias, 0)


        # retinanet_bias_init
        prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        init.constant_(self.cls_logits.bias, bias_value)
    def __init__(self, block, layers, output_stride, baseWidth = 26, scale = 4):
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.inplanes = 64
        blocks = [1, 2, 4]
        if output_stride == 16:
            strides = [1, 2, 2, 1]
            dilations = [1, 1, 1, 2]
        elif output_stride == 8:
            strides = [1, 2, 1, 1]
            dilations = [1, 1, 2, 4]
        else:
            raise NotImplementedError

        # Modules
        self.conv1 = nn.Sequential(
            nn.Conv(3, 32, 3, 2, 1, bias=False),
            nn.BatchNorm(32),
            nn.ReLU(),
            nn.Conv(32, 32, 3, 1, 1, bias=False),
            nn.BatchNorm(32),
            nn.ReLU(),
            nn.Conv(32, 64, 3, 1, 1, bias=False)
        )
        self.bn1 = nn.BatchNorm(64)
        self.relu = nn.ReLU()
        # self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
        self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
        self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3])
Exemplo n.º 3
0
 def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
     self.inplanes = 64
     super(Res2Net, self).__init__()
     self.baseWidth = baseWidth
     self.scale = scale
     self.conv1 = nn.Sequential(
         nn.Conv(3, 32, 3, stride=2, padding=1, bias=False),
         nn.BatchNorm(32), nn.ReLU(),
         nn.Conv(32, 32, 3, stride=1, padding=1, bias=False),
         nn.BatchNorm(32), nn.ReLU(),
         nn.Conv(32, 64, 3, stride=1, padding=1, bias=False))
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.Pool(3, stride=2, padding=1, op='maximum')
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Linear((512 * block.expansion), num_classes)
     for m in self.modules():
         if isinstance(m, nn.Conv):
             nn.init.kaiming_normal_(m.weight, mode='fan_out')
         elif isinstance(m, nn.BatchNorm):
             init.constant_(m.weight, value=1)
             init.constant_(m.bias, value=0)
Exemplo n.º 4
0
    def __init__(self, in_channels, out_channels):
        super(DANetHead, self).__init__()
        inter_channels = in_channels // 4
        self.conv5a = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.conv5c = nn.Sequential(
            nn.Conv(in_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        self.sa = PAM_Module(inter_channels)
        self.sc = CAM_Module(inter_channels)
        self.conv51 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())
        self.conv52 = nn.Sequential(
            nn.Conv(inter_channels, inter_channels, 3, padding=1, bias=False),
            nn.BatchNorm(inter_channels), nn.ReLU())

        #        self.conv6 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))
        #        self.conv7 = nn.Sequential(nn.Dropout(0.1, False), nn.Conv(inter_channels, out_channels, 1))

        self.conv8 = nn.Sequential(nn.Dropout(0.1, False),
                                   nn.Conv(inter_channels, out_channels, 1))
Exemplo n.º 5
0
 def __init__(self, in_ch, out_ch, mid_ch=None):
     super(DoubleConv, self).__init__()
     if (not mid_ch):
         mid_ch = out_ch
     self.double_conv = nn.Sequential(nn.Conv(in_ch, mid_ch, 3, padding=1),
                                      nn.BatchNorm(mid_ch), nn.ReLU(),
                                      nn.Conv(mid_ch, out_ch, 3, padding=1),
                                      nn.BatchNorm(out_ch), nn.ReLU())
Exemplo n.º 6
0
 def __init__(self, in_channels, out_channels, mid_channels=None):
     super().__init__()
     if (not mid_channels):
         mid_channels = out_channels
     self.double_conv = nn.Sequential(
         nn.Conv(in_channels, mid_channels, 3, padding=1),
         nn.BatchNorm(mid_channels), nn.ReLU(),
         nn.Conv(mid_channels, out_channels, 3, padding=1),
         nn.BatchNorm(out_channels), nn.ReLU())
Exemplo n.º 7
0
 def __init__(self, input_dim, dim, numAngle, numRho):
     super(DHT_Layer, self).__init__()
     self.fist_conv = nn.Sequential(nn.Conv2d(input_dim, dim, 1),
                                    nn.BatchNorm2d(dim), nn.ReLU())
     self.dht = DHT(numAngle=numAngle, numRho=numRho)
     self.convs = nn.Sequential(nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU(),
                                nn.Conv2d(dim, dim, 3, 1, 1),
                                nn.BatchNorm2d(dim), nn.ReLU())
Exemplo n.º 8
0
 def __init__(self, cin, cout, num_points, num_neighbors):
     super(KNNEmbed, self).__init__()
     self.num_points = num_points
     self.num_neighbors = num_neighbors
     self.embeds = nn.Sequential(
         nn.Conv1d(cin, cout, kernel_size=1, bias=False),
         nn.BatchNorm1d(cout), nn.ReLU(),
         nn.Conv1d(cout, cout, kernel_size=1, bias=False),
         nn.BatchNorm1d(cout), nn.ReLU())
Exemplo n.º 9
0
    def dis_loss(self, real_samps, fake_samps, height, alpha):
        r_preds = self.dis(real_samps, height, alpha)
        f_preds = self.dis(fake_samps, height, alpha)

        # loss = (torch.mean(nn.ReLU()(1 - r_preds)) +
        #         torch.mean(nn.ReLU()(1 + f_preds)))
        loss = (jt.mean(nn.ReLU()(1 - r_preds)) +
                jt.mean(nn.ReLU()(1 + f_preds)))
        return loss
Exemplo n.º 10
0
 def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
     super(_DenseLayer, self).__init__()
     self.add_module('norm1', nn.BatchNorm(num_input_features))
     self.add_module('relu1', nn.ReLU())
     self.add_module('conv1', nn.Conv(num_input_features, (bn_size * growth_rate), 1, stride=1, bias=False))
     self.add_module('norm2', nn.BatchNorm((bn_size * growth_rate)))
     self.add_module('relu2', nn.ReLU())
     self.add_module('conv2', nn.Conv((bn_size * growth_rate), growth_rate, 3, stride=1, padding=1, bias=False))
     self.drop_rate = drop_rate
     self.drop = nn.Dropout(self.drop_rate)
Exemplo n.º 11
0
 def __init__(self, features, num_classes=1000, init_weights=True):
     super(VGG, self).__init__()
     self.features = features
     self.classifier = nn.Sequential(
         nn.Linear(512 * 7 * 7, 4096),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(4096, 4096),
         nn.ReLU(),
         nn.Dropout(),
         nn.Linear(4096, num_classes),
     )
Exemplo n.º 12
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_inplanes = 256

        self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(),
            nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(),
            nn.Conv(256, num_classes, kernel_size=1, stride=1, bias=True))
Exemplo n.º 13
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_inplanes = 256  # mobilenet = 24 resnet / res2net = 256 xception = 128

        self.conv1 = nn.Conv(low_level_inplanes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(), nn.Dropout(0.5),
            nn.Conv(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm(256), nn.ReLU(), nn.Dropout(0.1),
            nn.Conv(256, num_classes, kernel_size=1, stride=1))
Exemplo n.º 14
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.down = nn.Sequential(nn.Conv(opt.channels, 64, 3, stride=2, padding=1), nn.ReLU())
     self.down_size = (opt.img_size // 2)
     down_dim = (64 * ((opt.img_size // 2) ** 2))
     self.embedding = nn.Linear(down_dim, 32)
     self.fc = nn.Sequential(
         nn.BatchNorm1d(32, 0.8),
         nn.ReLU(),
         nn.Linear(32, down_dim),
         nn.BatchNorm1d(down_dim),
         nn.ReLU()
     )
     self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(64, opt.channels, 3, stride=1, padding=1))
Exemplo n.º 15
0
def make_layers(cfg, batch_norm=False):
    layers = []
    in_channels = 3
    for v in cfg:
        if v == 'M':
            layers += [nn.Pool(kernel_size=2, stride=2, op="maximum")]
        else:
            conv2d = nn.Conv(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm(v), nn.ReLU()]
            else:
                layers += [conv2d, nn.ReLU()]
            in_channels = v
    return nn.Sequential(*layers)
Exemplo n.º 16
0
 def __init__(self, args, margs):
     super(PointCloudTransformer, self).__init__(args, margs)
     self.input_embeds = nn.Sequential(
         Permute(0, 2, 1), nn.Conv1d(3, 64, kernel_size=1, bias=False),
         nn.BatchNorm1d(64), nn.ReLU(),
         nn.Conv1d(64, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64),
         nn.ReLU(), Permute(0, 2, 1))
     self.knn_embeds = nn.Sequential(KNNEmbed(128, 128, 512, 32),
                                     KNNEmbed(256, 256, 256, 32))
     self.transformer = PointTransformer()
     self.classifier = nn.Sequential(nn.Linear(1024, 512),
                                     nn.BatchNorm1d(512), nn.ReLU(),
                                     nn.Dropout(p=0.5), nn.Linear(512, 256),
                                     nn.BatchNorm1d(256), nn.Dropout(p=0.5),
                                     nn.Linear(256, 40))
Exemplo n.º 17
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              dilation=1,
              downsample=None,
              BatchNorm=None,
              groups=1,
              base_width=64):
     super(Bottleneck, self).__init__()
     width = int(planes * (base_width / 64.)) * groups
     self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False)
     self.bn1 = BatchNorm(width)
     self.conv2 = nn.Conv2d(width,
                            width,
                            kernel_size=3,
                            stride=stride,
                            dilation=dilation,
                            padding=dilation,
                            bias=False,
                            groups=groups)
     self.bn2 = BatchNorm(width)
     self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
     self.bn3 = BatchNorm(planes * 4)
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stride = stride
     self.dilation = dilation
Exemplo n.º 18
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              dilation=1,
              downsample=None,
              BatchNorm=None,
              groups=1,
              base_width=64):
     super(BasicBlock, self).__init__()
     if BatchNorm is None:
         BatchNorm = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm(planes)
     self.relu = nn.ReLU()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = BatchNorm(planes)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 19
0
 def __init__(self, c_in, c_out, kernel_size, stride, padding, dilation):
     super(ConvBNReLU, self).__init__()
     self.conv = nn.Conv(
             c_in, c_out, kernel_size=kernel_size, stride=stride, 
             padding=padding, dilation=dilation, bias=False)
     self.bn = nn.BatchNorm(c_out)
     self.relu = nn.ReLU()
Exemplo n.º 20
0
    def __init__(self, part_num=50):
        super(PointConvDensity_partseg, self).__init__()
        self.part_num = part_num 

        self.sa0 = PointConvDensitySetAbstraction(npoint=1024, nsample=32, in_channel=3, mlp=[32,32,64], bandwidth = 0.1, group_all=False)
        self.sa1 = PointConvDensitySetAbstraction(npoint=256, nsample=32, in_channel=64 + 3, mlp=[64,64,128], bandwidth = 0.2, group_all=False)
        self.sa2 = PointConvDensitySetAbstraction(npoint=64, nsample=32, in_channel=128 + 3, mlp=[128,128,256], bandwidth = 0.4, group_all=False)
        self.sa3 = PointConvDensitySetAbstraction(npoint=36, nsample=32, in_channel=256 + 3, mlp=[256,256,512], bandwidth = 0.8, group_all=False)
        

        # TODO upsample  
        # upsampling 
        # def __init__(self, nsample, in_channel, mlp, bandwidth):

        self.in0 = PointConvDensitySetInterpolation(nsample=16, in_channel=512 + 3, mlp=[512,512], bandwidth=0.8)
        self.in1 = PointConvDensitySetInterpolation(nsample=16, in_channel=512 + 3, mlp=[256,256], bandwidth=0.4)
        self.in2 = PointConvDensitySetInterpolation(nsample=16, in_channel=256 + 3, mlp=[128,128], bandwidth=0.2)
        self.in3 = PointConvDensitySetInterpolation(nsample=16, in_channel=128 + 3, mlp=[128,128, 128], bandwidth=0.1)
        
        # self.fp0 = PointConvDensitySetAbstraction(npoint=1024, nsample=32, in_channel=3, mlp=[32,32,64], bandwidth = 0.1, group_all=False)
        # self.fp1 = PointConvDensitySetAbstraction(npoint=256, nsample=32, in_channel=64 + 3, mlp=[64,64,128], bandwidth = 0.2, group_all=False)
        # self.fp2 = PointConvDensitySetAbstraction(npoint=64, nsample=32, in_channel=128 + 3, mlp=[128,128,256], bandwidth = 0.4, group_all=False)
        # self.fp3 = PointConvDensitySetAbstraction(npoint=36, nsample=32, in_channel=256 + 3, mlp=[256,256,512], bandwidth = 0.8, group_all=False)
        
        self.fc1 = nn.Conv1d(128, 128, 1)
        self.bn1 = nn.BatchNorm1d(128)
        self.drop1 = nn.Dropout(0.4)
        self.fc3 = nn.Conv1d(128, self.part_num, 1)
        self.relu = nn.ReLU() 
Exemplo n.º 21
0
    def __init__(self, output_channels=40):
        super(Point_Transformer, self).__init__()

        self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)
        self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)

        self.bn1 = nn.BatchNorm1d(128)
        self.bn2 = nn.BatchNorm1d(128)

        self.sa1 = SA_Layer(128)
        self.sa2 = SA_Layer(128)
        self.sa3 = SA_Layer(128)
        self.sa4 = SA_Layer(128)

        self.conv_fuse = nn.Sequential(
            nn.Conv1d(512, 1024, kernel_size=1, bias=False),
            nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2))

        self.linear1 = nn.Linear(1024, 512, bias=False)
        self.bn6 = nn.BatchNorm1d(512)
        self.dp1 = nn.Dropout(p=0.5)
        self.linear2 = nn.Linear(512, 256)
        self.bn7 = nn.BatchNorm1d(256)
        self.dp2 = nn.Dropout(p=0.5)
        self.linear3 = nn.Linear(256, output_channels)

        self.relu = nn.ReLU()
Exemplo n.º 22
0
Arquivo: pct.py Projeto: wddwzc/PCT
 def __init__(self, in_channels, out_channels):
     super(Local_op, self).__init__()
     self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=False)
     self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm1d(out_channels)
     self.bn2 = nn.BatchNorm1d(out_channels)
     self.relu = nn.ReLU()
Exemplo n.º 23
0
    def __init__(self, in_channels, out_channels, pool_size):
        super(PyramidPool, self).__init__()

        self.conv = nn.Sequential(
            nn.AdaptiveAvgPool2d(pool_size),
            nn.Conv(in_channels, out_channels, 1, bias=False),
            nn.BatchNorm(out_channels), nn.ReLU())
Exemplo n.º 24
0
def make_conv3x3(
    in_channels,
    out_channels,
    dilation=1,
    stride=1,
    use_gn=False,
    use_relu=False,
    kaiming_init=True
):
    conv = Conv2d(
        in_channels,
        out_channels,
        kernel_size=3,
        stride=stride,
        padding=dilation,
        dilation=dilation,
        bias=False if use_gn else True
    )
    if kaiming_init:
        init.kaiming_normal_(
            conv.weight, mode="fan_out", nonlinearity="relu"
        )
    else:
        init.gauss_(conv.weight, std=0.01)
    if not use_gn:
        init.constant_(conv.bias, 0)
    module = [conv,]
    if use_gn:
        module.append(group_norm(out_channels))
    if use_relu:
        module.append(nn.ReLU())
    if len(module) > 1:
        return nn.Sequential(*module)
    return conv
Exemplo n.º 25
0
 def __init__(self, n_classes=40):
     super(PointConvDensityClsSsg, self).__init__()
     self.sa1 = PointConvDensitySetAbstraction(npoint=512,
                                               nsample=32,
                                               in_channel=3,
                                               mlp=[64, 64, 128],
                                               bandwidth=0.1,
                                               group_all=False)
     self.sa2 = PointConvDensitySetAbstraction(npoint=128,
                                               nsample=64,
                                               in_channel=128 + 3,
                                               mlp=[128, 128, 256],
                                               bandwidth=0.2,
                                               group_all=False)
     self.sa3 = PointConvDensitySetAbstraction(npoint=1,
                                               nsample=None,
                                               in_channel=256 + 3,
                                               mlp=[256, 512, 1024],
                                               bandwidth=0.4,
                                               group_all=True)
     self.fc1 = nn.Linear(1024, 512)
     self.bn1 = nn.BatchNorm1d(512)
     self.drop1 = nn.Dropout(0.4)
     self.fc2 = nn.Linear(512, 256)
     self.bn2 = nn.BatchNorm1d(256)
     self.drop2 = nn.Dropout(0.4)
     self.fc3 = nn.Linear(256, n_classes)
     self.relu = nn.ReLU()
Exemplo n.º 26
0
    def test_relu(self):
        # ***************************************************************
        # Test ReLU Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal(arr, jnn.ReLU(), tnn.ReLU())

        # ***************************************************************
        # Test PReLU Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal(arr, jnn.PReLU(), tnn.PReLU())
        check_equal(arr, jnn.PReLU(10, 99.9), tnn.PReLU(10, 99.9))
        check_equal(arr, jnn.PReLU(10, 2), tnn.PReLU(10, 2))
        check_equal(arr, jnn.PReLU(10, -0.2), tnn.PReLU(10, -0.2))
        
        # ***************************************************************
        # Test ReLU6 Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal(arr, jnn.ReLU6(), tnn.ReLU6())

        # ***************************************************************
        # Test LeakyReLU Layer
        # ***************************************************************
        arr = np.random.randn(16,10,224,224)
        check_equal(arr, jnn.LeakyReLU(), tnn.LeakyReLU())
        check_equal(arr, jnn.LeakyReLU(2), tnn.LeakyReLU(2))
        check_equal(arr, jnn.LeakyReLU(99.9), tnn.LeakyReLU(99.9))
Exemplo n.º 27
0
    def __init__(self, in_size, out_size, inner_nc, dropout=0.0, innermost=False, outermost=False, submodule=None):
        super(UnetBlock, self).__init__()
        self.outermost = outermost

        downconv = nn.Conv(in_size, inner_nc, 4, stride=2, padding=1, bias=False)
        downnorm = nn.BatchNorm2d(inner_nc)
        downrelu = nn.LeakyReLU(0.2)
        upnorm = nn.BatchNorm2d(out_size)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose(inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose(2*inner_nc, out_size, 4, stride=2, padding=1, bias=False)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]
            if dropout:
                model = down + [submodule] + up + [nn.Dropout(dropout)]
            else:
                model = down + [submodule] + up
        
        self.model = nn.Sequential(*model)

        for m in self.modules():
            weights_init_normal(m)
Exemplo n.º 28
0
    def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000):
        self.inplanes = 64
        super(Res2Net, self).__init__()
        self.baseWidth = baseWidth
        self.scale = scale
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
Exemplo n.º 29
0
    def __init__(self, part_num=50):
        super(Point_Transformer_partseg, self).__init__()
        self.part_num = part_num
        self.conv1 = nn.Conv1d(3, 128, kernel_size=1, bias=False)
        self.conv2 = nn.Conv1d(128, 128, kernel_size=1, bias=False)

        self.bn1 = nn.BatchNorm1d(128)
        self.bn2 = nn.BatchNorm1d(128)

        self.sa1 = SA_Layer(128)
        self.sa2 = SA_Layer(128)
        self.sa3 = SA_Layer(128)
        self.sa4 = SA_Layer(128)

        self.conv_fuse = nn.Sequential(
            nn.Conv1d(512, 1024, kernel_size=1, bias=False),
            nn.BatchNorm1d(1024), nn.LeakyReLU(scale=0.2))

        self.label_conv = nn.Sequential(
            nn.Conv1d(16, 64, kernel_size=1, bias=False), nn.BatchNorm1d(64),
            nn.LeakyReLU(scale=0.2))

        self.convs1 = nn.Conv1d(1024 * 3 + 64, 512, 1)
        self.dp1 = nn.Dropout(0.5)
        self.convs2 = nn.Conv1d(512, 256, 1)
        self.convs3 = nn.Conv1d(256, self.part_num, 1)
        self.bns1 = nn.BatchNorm1d(512)
        self.bns2 = nn.BatchNorm1d(256)

        self.relu = nn.ReLU()
Exemplo n.º 30
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_downsampling=3,
                 n_blocks=1,
                 norm_layer=nn.InstanceNorm2d,
                 padding_type='reflect'):
        assert (n_blocks >= 0)
        super(GeometryEncoder, self).__init__()
        activation = nn.ReLU()

        model = [
            nn.ReflectionPad2d(3),
            nn.Conv(input_nc, ngf, 7, padding=0),
            norm_layer(ngf), activation
        ]
        ### downsample
        for i in range(n_downsampling):
            mult = 2**i
            model += [
                nn.Conv(ngf * mult, ngf * mult * 2, 3, stride=2, padding=1),
                norm_layer(ngf * mult * 2), activation
            ]

        mult = 2**n_downsampling
        for i in range(n_blocks):
            model += [
                ResnetBlock(ngf * mult,
                            norm_type='in',
                            padding_type=padding_type)
            ]
        self.model = nn.Sequential(*model)