def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 baseWidth=26,
                 scale=4,
                 stype='normal'):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            downsample: None when stride = 1
            baseWidth: basic width of conv3x3
            scale: number of scale.
            type: 'normal': normal set. 'stage': first block of a new stage.
        """
        super(Bottle2neck, self).__init__()

        width = int(math.floor(planes * (baseWidth / 64.0)))
        self.conv1 = nn.Conv2d(inplanes,
                               width * scale,
                               kernel_size=1,
                               bias=False)
        self.bn1 = BatchNorm2d(width * scale)

        if scale == 1:
            self.nums = 1
        else:
            self.nums = scale - 1
        if stype == 'stage':
            self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
        convs = []
        bns = []
        for i in range(self.nums):
            convs.append(
                nn.Conv2d(width,
                          width,
                          kernel_size=3,
                          stride=stride,
                          padding=1,
                          bias=False))
            bns.append(BatchNorm2d(width))
        self.convs = nn.ModuleList(convs)
        self.bns = nn.ModuleList(bns)

        self.conv3 = nn.Conv2d(width * scale,
                               planes * self.expansion,
                               kernel_size=1,
                               bias=False)
        self.bn3 = BatchNorm2d(planes * self.expansion)

        self.relu = relu_fn
        self.downsample = downsample
        self.stype = stype
        self.scale = scale
        self.width = width
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
     self.relu = relu_fn
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
     self.downsample = downsample
     self.stride = stride
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        in_channels = self.inplanes
        for i in range(num_layers):  # 只3个?
            kernel, padding, output_padding = \
                self._get_deconv_cfg(num_kernels[i], i)  # 返回4 1 0

            planes = num_filters[i]  # 256 256 256
            layers.append(
                nn.Sequential(
                    DwTrans(in_channels,
                            out_channels=planes,
                            kernel_size=kernel,
                            stride=2,
                            padding=padding,
                            output_padding=output_padding,
                            bias=self.deconv_with_bias),
                    BatchNorm2d(planes, momentum=BN_MOMENTUM),
                    # nn.ReLU(inplace=True),
                    # 新增后面的卷积
                    # nn.Conv2d(256,256,3,1,1),
                    # BatchNorm2d(256, momentum=BN_MOMENTUM),
                    # nn.ReLU(inplace=True),
                    # nn.Conv2d(256,256,3,1,1),
                    # BatchNorm2d(256, momentum=BN_MOMENTUM),
                    # nn.ReLU(inplace=True),
                ))
            in_channels = planes

        return nn.ModuleList(layers)  # 【see】改
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  stype='stage',
                  baseWidth=self.baseWidth,
                  scale=self.scale))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      baseWidth=self.baseWidth,
                      scale=self.scale))

        return nn.Sequential(*layers)
 def __init__(self, block, layers, cfg, **kwargs):
     self.inplanes = 64
     self.baseWidth = cfg.MODEL.BASEWIDTH
     self.scale = cfg.MODEL.SCALE
     extra = cfg.MODEL.EXTRA
     self.deconv_with_bias = extra.DECONV_WITH_BIAS
     self.method = extra.LOSS_TYPE
     super(PoseRes2Net, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
     self.bn1 = BatchNorm2d(64, momentum=BN_MOMENTUM)
     self.relu = relu_fn
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)  # 4
     self.deconv_layers2 = self._make_deconv_layer(
         extra.STAGE2.NUM_DECONV_LAYERS,  # 3
         extra.STAGE2.NUM_DECONV_FILTERS,  # 256 256 256
         extra.STAGE2.NUM_DECONV_KERNELS,  # 4 4 4
     )
     # self.change_channel_2 =nn.Conv2d(
     #     in_channels=512,  # 【】把deconv改为resnet对应的通道数
     #     out_channels=256,
     #     kernel_size=1,  # 1
     #     stride=1,
     #     padding=0
     # )
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)  # 6
     self.deconv_layers3 = self._make_deconv_layer(
         extra.STAGE3.NUM_DECONV_LAYERS,  # 3
         extra.STAGE3.NUM_DECONV_FILTERS,  # 256 256 256
         extra.STAGE3.NUM_DECONV_KERNELS,  # 4 4 4
     )
     # self.change_channel_3 =nn.Conv2d(#参数量太大了:只有1M
     #     in_channels=1024,  # 【】把deconv改为resnet对应的通道数
     #     out_channels=256,
     #     kernel_size=1,  # 1
     #     stride=1,
     #     padding=0
     # )
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)  # 3
     self.deconv_layers4 = self._make_deconv_layer(
         extra.STAGE4.NUM_DECONV_LAYERS,  # 3
         extra.STAGE4.NUM_DECONV_FILTERS,  # 256 256 256
         extra.STAGE4.NUM_DECONV_KERNELS,  # 4 4 4
     )
     # used for deconv layers
     self.channel_att2 = SEBlock(16, 1)
     self.channel_att3 = SEBlock(16, 1)
     self.channel_att4 = SEBlock(16, 1)
     self.final_layer = nn.Conv2d(
         in_channels=extra.STAGE4.NUM_DECONV_FILTERS[-1],
         out_channels=cfg.MODEL.NUM_JOINTS,
         kernel_size=extra.FINAL_CONV_KERNEL,
         stride=1,
         padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0)
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = BatchNorm2d(planes, momentum=BN_MOMENTUM)
     self.conv3 = nn.Conv2d(planes,
                            planes * self.expansion,
                            kernel_size=1,
                            bias=False)
     self.bn3 = BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)
     self.relu = relu_fn
     self.downsample = downsample
     self.stride = stride
Пример #7
0
 def _change_channel(self,inplane,outplane):
     change_channel = nn.Sequential(
     nn.Conv2d(
         in_channels=inplane,  # 【】把deconv改为resnet对应的通道数
         out_channels=outplane,
         kernel_size=1,  # 1
         stride=1,
         padding=0
     ),
     BatchNorm2d(outplane,momentum=BN_MOMENTUM)
     )
     return change_channel
Пример #8
0
    def __init__(self, block_args, global_params):
        super().__init__()
        self._block_args = block_args
        self._bn_mom = 1 - global_params.batch_norm_momentum
        self._bn_eps = global_params.batch_norm_epsilon
        self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip  # skip connection and drop connect

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Expansion phase
        inp = self._block_args.input_filters  # number of input channels
        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels
        if self._block_args.expand_ratio != 1:
            self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
            self._bn0 = BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)

        # Depthwise convolution phase
        k = self._block_args.kernel_size
        s = self._block_args.stride
        self._depthwise_conv = Conv2d(
            in_channels=oup, out_channels=oup, groups=oup,  # groups makes it depthwise
            kernel_size=k, stride=s, bias=False)
        self._bn1 = BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)

        # Squeeze and Excitation layer, if desired
        if self.has_se:
            num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
            self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
            self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)

        # Output phase
        final_oup = self._block_args.output_filters
        self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
        self._bn2 = BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
Пример #9
0
    def __init__(self, cfg,blocks_args=None, global_params=None,**kwargs):#不会被前面两个参数干扰了吧
        super().__init__()
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args
        #【see】
        # self.inplanes = 320#deconv前的通道数
        extra = cfg.MODEL.EXTRA
        self.deconv_with_bias = extra.DECONV_WITH_BIAS  # false
        self.relu = nn.ReLU(inplace=True)
        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
        self._bn0 = BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)

        # Build blocks
        self._blocks = nn.ModuleList([])
        for block_args in self._blocks_args:

            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters, self._global_params),
                output_filters=round_filters(block_args.output_filters, self._global_params),
                num_repeat=round_repeats(block_args.num_repeat, self._global_params)
            )

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(MBConvBlock(block_args, self._global_params))


        #【see】
        self.method = extra.LOSS_TYPE
        self.outplane_idx = extra.OUTPLANE_IDX
        # print(self.outplane_idx)
        # self.deconv_layers2 = self._make_deconv_layer(
        #     extra.STAGE2.NUM_DECONV_LAYERS,  # 3
        #     extra.STAGE2.NUM_DECONV_FILTERS,  # 256 256 256
        #     extra.STAGE2.NUM_DECONV_KERNELS,  # 4 4 4
        #     extra.DECONV_INCHANNELS[0]
        # )
        self.deconv_layers3 = self._make_deconv_layer(
            extra.STAGE3.NUM_DECONV_LAYERS,  # 3
            extra.STAGE3.NUM_DECONV_FILTERS,  # 256 256 256
            extra.STAGE3.NUM_DECONV_KERNELS,  # 4 4 4
            extra.DECONV_INCHANNELS[1]
        )

        # self. change_channel_2 = self._change_channel(extra.DECONV_INCHANNELS[0],256)
        # self. change_channel_3 = self._change_channel(extra.DECONV_INCHANNELS[1],256)
        self.deconv_layers4 = self._make_deconv_layer(
            extra.STAGE4.NUM_DECONV_LAYERS,  # 3
            extra.STAGE4.NUM_DECONV_FILTERS,  # 256 256 256
            extra.STAGE4.NUM_DECONV_KERNELS,  # 4 4 4
            extra.DECONV_INCHANNELS[2]
        )
        self.deconv_layers5 = self._make_deconv_layer(
            extra.STAGE5.NUM_DECONV_LAYERS,  # 5
            extra.STAGE5.NUM_DECONV_FILTERS,  # 256 256 256
            extra.STAGE5.NUM_DECONV_KERNELS,  # 4 4 4
            extra.DECONV_INCHANNELS[3]
        )
        self.final_layer = nn.Conv2d(
            in_channels=extra.STAGE4.NUM_DECONV_FILTERS[-1],  # 256
            out_channels=cfg.MODEL.NUM_JOINTS,
            kernel_size=extra.FINAL_CONV_KERNEL,  # 1
            stride=1,
            padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0  # 0
        )
        # self.channel_att = SEBlock(cfg.MODEL.NUM_JOINTS,16)
        self.channel_att2 = SEBlock(64,2)
        self.channel_att3 = SEBlock(64,2)
        self.channel_att4 = SEBlock(64,2)
        self.channel_att5 = SEBlock(64,2)