Ejemplo n.º 1
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              norm_layer=None,
              reduction_ratio=16):
     super().__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = norm_layer(planes * self.expansion)
     self.relu = nn.ReLU()
     self.se = SELayer(planes * self.expansion,
                       reduction_ratio)  # Scale Layer.
     self.downsample = downsample
     self.stride = stride
Ejemplo n.º 2
0
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
                 base_width=64, dilation=1, norm_layer=None,
                 reduction_ratio=16, dilation_value=2, pool_stride=2, use_ca=True, use_sa=True):

        super().__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        if groups != 1 or base_width != 64:
            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
        if dilation > 1:
            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = norm_layer(planes)
        self.relu = nn.ReLU()
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = norm_layer(planes)
        self.att = MyLayer(num_channels=planes, reduction_ratio=reduction_ratio,
                           dilation_value=dilation_value, pool_stride=pool_stride, use_ca=use_ca, use_sa=use_sa)
        self.downsample = downsample
        self.stride = stride
Ejemplo n.º 3
0
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1,
                 norm_layer=None, reduction_ratio=16, kernel_size=7, use_ca=True, use_sa=True):

        super().__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        if groups != 1 or base_width != 64:
            raise ValueError('BasicBlock only supports groups=1 and base_width=64')
        if dilation > 1:
            raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
        # Both self.conv1 and self.downsample layers downsample the input when stride != 1
        self.conv1 = conv3x3(inplanes, planes, stride)
        self.bn1 = norm_layer(planes)
        self.relu = nn.ReLU()
        self.conv2 = conv3x3(planes, planes)
        self.bn2 = norm_layer(planes)
        self.downsample = downsample
        self.stride = stride

        self.use_cbam = (use_ca or use_sa)
        self.cbam = CBAMLayer(planes * self.expansion, reduction_ratio, kernel_size, use_ca, use_sa)
Ejemplo n.º 4
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              norm_layer=None,
              reduction_ratio=16):
     super().__init__()
     if norm_layer is None:
         norm_layer = nn.BatchNorm2d
     if groups != 1 or base_width != 64:
         raise ValueError(
             'SEBasicBlock only supports groups=1 and base_width=64')
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.ReLU()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.se = SELayer(planes, reduction_ratio)  # Scale Layer.
     self.downsample = downsample
     self.stride = stride
Ejemplo n.º 5
0
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
                 base_width=64, dilation=1, norm_layer=None,
                 reduction_ratio=16, dilation_value=2, pool_stride=2, use_ca=True, use_sa=True):

        super().__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        width = int(planes * (base_width / 64.)) * groups
        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
        self.conv1 = conv1x1(inplanes, width)
        self.bn1 = norm_layer(width)
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
        self.bn2 = norm_layer(width)
        self.conv3 = conv1x1(width, planes * self.expansion)
        self.bn3 = norm_layer(planes * self.expansion)
        self.relu = nn.ReLU()
        self.att = MyLayer(num_channels=planes * self.expansion, reduction_ratio=reduction_ratio,
                           dilation_value=dilation_value, pool_stride=pool_stride, use_ca=use_ca, use_sa=use_sa)
        self.downsample = downsample
        self.stride = stride
Ejemplo n.º 6
0
    def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1,
                 norm_layer=None, reduction_ratio=16, kernel_size=7, use_ca=True, use_sa=True):

        super().__init__()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d
        width = int(planes * (base_width / 64.)) * groups
        # Both self.conv2 and self.downsample layers downsample the input when stride != 1
        self.conv1 = conv1x1(inplanes, width)
        self.bn1 = norm_layer(width)
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
        self.bn2 = norm_layer(width)
        self.conv3 = conv1x1(width, planes * self.expansion)
        self.bn3 = norm_layer(planes * self.expansion)
        self.relu = nn.ReLU()
        self.downsample = downsample
        self.stride = stride

        self.use_cbam = (use_ca or use_sa)
        self.cbam = CBAMLayer(planes * self.expansion, reduction_ratio, kernel_size, use_ca, use_sa)