def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(inplanes, planes)
     self.bn1 = SynchronizedBatchNorm2d(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn2 = SynchronizedBatchNorm2d(planes)
     self.conv3 = conv1x1(planes, planes * self.expansion)
     self.bn3 = SynchronizedBatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Exemplo n.º 3
0
    def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), dilation=1):
        super(ConvBn2d, self).__init__()

        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding,
                              bias=False, dilation=dilation)
        #self.bn = nn.BatchNorm2d(out_channels)
        self.bn = SynchronizedBatchNorm2d(out_channels)
 def __init__(self, in_channel):
     super(RefineBlock, self).__init__()
     self.c1 = nn.Conv2d(in_channel, 512, kernel_size=1, stride=1, padding=0, bias=False)
     self.c3_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
     self.bn = SynchronizedBatchNorm2d(512)
     self.relu = nn.ReLU(inplace=True)
     self.c3_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False)
 def __init__(self, in_size, out_size):
     super(GAU, self).__init__()
     self.in_size = in_size
     self.out_size = out_size
     self.conv = nn.Conv2d(in_size*2, out_size, kernel_size=1, stride=1, bias=False)
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     self.bn=SynchronizedBatchNorm2d(in_size)
     self.relu=nn.ReLU(inplace=True)
Exemplo n.º 6
0
def basic_bn_shortcut(inplanes, outplanes, stride):
    return nn.Sequential(
        nn.Conv2d(inplanes,
                  outplanes,
                  kernel_size=1,
                  stride=stride,
                  bias=False),
        SynchronizedBatchNorm2d(outplanes),
    )
Exemplo n.º 7
0
    def __init__(self,
                 inplanes,
                 outplanes,
                 innerplanes,
                 stride=1,
                 dilation=1,
                 group=1,
                 downsample=None):
        super().__init__()
        # In original resnet, stride=2 is on 1x1.
        # In fb.torch resnet, stride=2 is on 3x3.
        (str1x1, str3x3) = (stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1,
                                                                       stride)
        self.stride = stride

        self.conv1 = nn.Conv2d(inplanes,
                               innerplanes,
                               kernel_size=1,
                               stride=str1x1,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(innerplanes)

        self.conv2 = nn.Conv2d(innerplanes,
                               innerplanes,
                               kernel_size=3,
                               stride=str3x3,
                               bias=False,
                               padding=1 * dilation,
                               dilation=dilation,
                               groups=group)
        self.bn2 = SynchronizedBatchNorm2d(innerplanes)

        self.conv3 = nn.Conv2d(innerplanes,
                               outplanes,
                               kernel_size=1,
                               stride=1,
                               bias=False)
        self.bn3 = SynchronizedBatchNorm2d(outplanes)

        self.downsample = downsample
        self.relu = nn.ReLU(inplace=True)
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                SynchronizedBatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)
    def add_norm_layer(layer):
        if norm_type == 'batch':
            norm_layer = nn.BatchNorm2d(get_out_channel(layer),
                                        affine=True,
                                        track_running_stats=True)
        elif norm_type == 'sync_batch':
            norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer),
                                                 affine=True)
        elif norm_type == 'instance':
            norm_layer = nn.InstanceNorm2d(get_out_channel(layer),
                                           affine=False)
        else:
            raise ValueError('normalization layer %s is not recognized' %
                             norm_type)

        return norm_layer
Exemplo n.º 10
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel,
              stride=1,
              activation=None,
              use_bn=False):
     super(BaseConv, self).__init__()
     self.use_bn = use_bn
     self.activation = activation
     self.conv = nn.Conv2d(in_channels, out_channels, kernel, stride,
                           kernel // 2)
     self.conv.weight.data.normal_(0, 0.01)
     self.conv.bias.data.zero_()
     self.bn = SynchronizedBatchNorm2d(out_channels)
     self.bn.weight.data.fill_(1)
     self.bn.bias.data.zero_()
    def __init__(self, in_channel, out_channel):
        super(FPA, self).__init__()

        self.c15_1 = nn.Conv2d(in_channel, out_channel, kernel_size=15, stride=1, padding=7, bias=False)
        self.c11_1 = nn.Conv2d(in_channel, out_channel, kernel_size=11, stride=1, padding=5, bias=False)
        self.c7_1 = nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
        self.c3_1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)

        self.c15_2 = nn.Conv2d(in_channel, out_channel, kernel_size=15, stride=1, padding=7, bias=False)
        self.c11_2 = nn.Conv2d(in_channel, out_channel, kernel_size=11, stride=1, padding=5, bias=False)
        self.c7_2 = nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=1, padding=3, bias=False)
        self.c3_2 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)

        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.c1_gpb = nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)

        self.bn = SynchronizedBatchNorm2d(out_channel)
        self.relu = nn.ReLU(inplace=True)
Exemplo n.º 12
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              padding=1,
              dilation=1,
              stride=1,
              groups=1,
              is_bn=True):
     super(ConvBn2d, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           kernel_size=kernel_size,
                           padding=padding,
                           stride=stride,
                           dilation=dilation,
                           groups=groups,
                           bias=False)
     self.bn = SynchronizedBatchNorm2d(out_channels)
    def __init__(self, block, layers, num_classes=1000):
        super(MV3_1_true_2_ResNet, self).__init__()
        # self.do = nn.Dropout(p=0.5)

        self.inplanes = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = SynchronizedBatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.rb1_1 = RefineBlock(256)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.rb2_1 = RefineBlock(512)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.rb3_1 = RefineBlock(1024)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.rb4_1 = RefineBlock(2048)
        # self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        # self.fc = nn.Linear(512 * block.expansion, num_classes)
        # only for >=res50

        # self.fpa=FPA(2048,512)
        self.fpa = FPA(512, 512)
        self.rb4_2 = RefineBlock(512 * 5)

        self.fuse43 = GAU(512, 512)
        # self.post_proc43 = conv3x3_bn(512*2,512)
        self.rb3_2 = RefineBlock(512)
        self.fuse32 = GAU(512, 512)
        self.rb2_2 = RefineBlock(512)
        # self.post_proc32 = conv3x3_bn(512)
        self.fuse21 = GAU(512, 512)
        self.rb1_2 = RefineBlock(512)
        # self.post_proc21 = conv3x3_bn(512)

        self.class_conv = nn.Conv2d(512, num_classes, kernel_size=3, stride=1,
                                    padding=1, bias=True)
Exemplo n.º 14
0
def sync_batchnorm_2d(in_features, eps=1e-5, momentum=0.0001, affine=True):
    return SynchronizedBatchNorm2d(in_features,
                                   eps=eps,
                                   momentum=momentum,
                                   affine=affine)