def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0,
                 stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1,
                 se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
                 drop_path_rate=0.):
        super(EdgeResidual, self).__init__()
        norm_kwargs = norm_kwargs or {}
        if fake_in_chs > 0:
            mid_chs = make_divisible(fake_in_chs * exp_ratio)
        else:
            mid_chs = make_divisible(in_chs * exp_ratio)
        has_se = se_ratio is not None and se_ratio > 0.
        self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
        self.drop_path_rate = drop_path_rate

        # Expansion convolution
        self.conv_exp = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type)
        self.bn1 = norm_layer(mid_chs, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

        # Squeeze-and-excitation
        if has_se:
            se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
        else:
            self.se = None

        # Point-wise linear projection
        self.conv_pwl = create_conv2d(
            mid_chs, out_chs, pw_kernel_size, stride=stride, dilation=dilation, padding=pad_type)
        self.bn2 = norm_layer(out_chs, **norm_kwargs)
 def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''):
     super(SeparableConv2d, self).__init__()
     self.depthwise_conv2d = create_conv2d(
         in_channels, in_channels, kernel_size=kernel_size,
         stride=stride, padding=padding, groups=in_channels)
     self.pointwise_conv2d = create_conv2d(
         in_channels, out_channels, kernel_size=1, padding=padding)
    def __init__(self, in_chs, out_chs, dw_kernel_size=3,
                 stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
                 pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None,
                 norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.):
        super(DepthwiseSeparableConv, self).__init__()
        norm_kwargs = norm_kwargs or {}
        has_se = se_ratio is not None and se_ratio > 0.
        self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
        self.has_pw_act = pw_act  # activation after point-wise conv
        self.drop_path_rate = drop_path_rate

        self.conv_dw = create_conv2d(
            in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True)
        self.bn1 = norm_layer(in_chs, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

        # Squeeze-and-excitation
        if has_se:
            se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
            self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs)
        else:
            self.se = None

        self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
        self.bn2 = norm_layer(out_chs, **norm_kwargs)
        self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity()
예제 #4
0
    def __init__(self,
                 in_chs,
                 num_1x1_a,
                 num_3x3_b,
                 num_1x1_c,
                 inc,
                 groups,
                 block_type='normal',
                 b=False):
        super(DualPathBlock, self).__init__()
        self.num_1x1_c = num_1x1_c
        self.inc = inc
        self.b = b
        if block_type == 'proj':
            self.key_stride = 1
            self.has_proj = True
        elif block_type == 'down':
            self.key_stride = 2
            self.has_proj = True
        else:
            assert block_type == 'normal'
            self.key_stride = 1
            self.has_proj = False

        self.c1x1_w_s1 = None
        self.c1x1_w_s2 = None
        if self.has_proj:
            # Using different member names here to allow easier parameter key matching for conversion
            if self.key_stride == 2:
                self.c1x1_w_s2 = BnActConv2d(in_chs=in_chs,
                                             out_chs=num_1x1_c + 2 * inc,
                                             kernel_size=1,
                                             stride=2)
            else:
                self.c1x1_w_s1 = BnActConv2d(in_chs=in_chs,
                                             out_chs=num_1x1_c + 2 * inc,
                                             kernel_size=1,
                                             stride=1)

        self.c1x1_a = BnActConv2d(in_chs=in_chs,
                                  out_chs=num_1x1_a,
                                  kernel_size=1,
                                  stride=1)
        self.c3x3_b = BnActConv2d(in_chs=num_1x1_a,
                                  out_chs=num_3x3_b,
                                  kernel_size=3,
                                  stride=self.key_stride,
                                  groups=groups)
        if b:
            self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
            self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1)
            self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1)
        else:
            self.c1x1_c = BnActConv2d(in_chs=num_3x3_b,
                                      out_chs=num_1x1_c + inc,
                                      kernel_size=1,
                                      stride=1)
            self.c1x1_c1 = None
            self.c1x1_c2 = None
 def __init__(self, in_channels, out_channels, padding=''):
     super(FactorizedReduction, self).__init__()
     self.act = nn.ReLU()
     self.path_1 = nn.Sequential(OrderedDict([
         ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
         ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
     ]))
     self.path_2 = nn.Sequential(OrderedDict([
         ('pad', nn.ZeroPad2d((-1, 1, -1, 1))),  # shift
         ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)),
         ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)),
     ]))
     self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001)
    def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''):
        super(CellStem0, self).__init__()
        self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type)

        self.comb_iter_0_left = BranchSeparables(
            in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type)
        self.comb_iter_0_right = nn.Sequential(OrderedDict([
            ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)),
            ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)),
            ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)),
        ]))

        self.comb_iter_1_left = BranchSeparables(
            out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type)
        self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type)

        self.comb_iter_2_left = BranchSeparables(
            out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type)
        self.comb_iter_2_right = BranchSeparables(
            out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type)

        self.comb_iter_3_left = BranchSeparables(
            out_chs_right, out_chs_right, kernel_size=3, padding=pad_type)
        self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type)

        self.comb_iter_4_left = BranchSeparables(
            in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type)
        self.comb_iter_4_right = ActConvBn(
            out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type)
 def __init__(self, in_chs, out_chs, kernel_size,
              stride=1, dilation=1, pad_type='', act_layer=nn.ReLU,
              norm_layer=nn.BatchNorm2d, norm_kwargs=None):
     super(ConvBnAct, self).__init__()
     norm_kwargs = norm_kwargs or {}
     self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type)
     self.bn1 = norm_layer(out_chs, **norm_kwargs)
     self.act1 = act_layer(inplace=True)
예제 #8
0
    def __init__(self,
                 block_args,
                 out_indices=(0, 1, 2, 3, 4),
                 feature_location='bottleneck',
                 in_chans=3,
                 stem_size=16,
                 channel_multiplier=1.0,
                 output_stride=32,
                 pad_type='',
                 act_layer=nn.ReLU,
                 drop_rate=0.,
                 drop_path_rate=0.,
                 se_kwargs=None,
                 norm_layer=nn.BatchNorm2d,
                 norm_kwargs=None):
        super(MobileNetV3Features, self).__init__()
        norm_kwargs = norm_kwargs or {}
        self.drop_rate = drop_rate

        # Stem
        stem_size = round_channels(stem_size, channel_multiplier)
        self.conv_stem = create_conv2d(in_chans,
                                       stem_size,
                                       3,
                                       stride=2,
                                       padding=pad_type)
        self.bn1 = norm_layer(stem_size, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

        # Middle stages (IR/ER/DS Blocks)
        builder = EfficientNetBuilder(channel_multiplier,
                                      8,
                                      None,
                                      output_stride,
                                      pad_type,
                                      act_layer,
                                      se_kwargs,
                                      norm_layer,
                                      norm_kwargs,
                                      drop_path_rate,
                                      feature_location=feature_location,
                                      verbose=_DEBUG)
        self.blocks = nn.Sequential(*builder(stem_size, block_args))
        self.feature_info = FeatureInfo(builder.features, out_indices)
        self._stage_out_idx = {
            v['stage']: i
            for i, v in enumerate(self.feature_info) if i in out_indices
        }

        efficientnet_init_weights(self)

        # Register feature extraction hooks with FeatureHooks helper
        self.feature_hooks = None
        if feature_location != 'bottleneck':
            hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
            self.feature_hooks = FeatureHooks(hooks, self.named_modules())
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride=1,
              padding=''):
     super(ActConvBn, self).__init__()
     self.act = nn.ReLU()
     self.conv = create_conv2d(in_channels,
                               out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding)
     self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1)
예제 #10
0
 def __init__(self,
              in_chs,
              out_chs,
              kernel_size,
              stride,
              groups=1,
              norm_layer=BatchNormAct2d):
     super(BnActConv2d, self).__init__()
     self.bn = norm_layer(in_chs, eps=0.001)
     self.conv = create_conv2d(in_chs,
                               out_chs,
                               kernel_size,
                               stride=stride,
                               groups=groups)
    def __init__(self,
                 inplanes,
                 planes,
                 kernel_size=3,
                 stride=1,
                 dilation=1,
                 padding='',
                 act_layer=nn.ReLU,
                 norm_layer=nn.BatchNorm2d,
                 norm_kwargs=None):
        super(SeparableConv2d, self).__init__()
        norm_kwargs = norm_kwargs if norm_kwargs is not None else {}
        self.kernel_size = kernel_size
        self.dilation = dilation

        # depthwise convolution
        self.conv_dw = create_conv2d(inplanes,
                                     inplanes,
                                     kernel_size,
                                     stride=stride,
                                     padding=padding,
                                     dilation=dilation,
                                     depthwise=True)
        self.bn_dw = norm_layer(inplanes, **norm_kwargs)
        if act_layer is not None:
            self.act_dw = act_layer(inplace=True)
        else:
            self.act_dw = None

        # pointwise convolution
        self.conv_pw = create_conv2d(inplanes, planes, kernel_size=1)
        self.bn_pw = norm_layer(planes, **norm_kwargs)
        if act_layer is not None:
            self.act_pw = act_layer(inplace=True)
        else:
            self.act_pw = None
예제 #12
0
    def __init__(self,
                 block_args,
                 num_classes=1000,
                 in_chans=3,
                 stem_size=16,
                 num_features=1280,
                 head_bias=True,
                 channel_multiplier=1.0,
                 pad_type='',
                 act_layer=nn.ReLU,
                 drop_rate=0.,
                 drop_path_rate=0.,
                 se_kwargs=None,
                 norm_layer=nn.BatchNorm2d,
                 norm_kwargs=None,
                 global_pool='avg'):
        super(MobileNetV3, self).__init__()

        self.num_classes = num_classes
        self.num_features = num_features
        self.drop_rate = drop_rate

        # Stem
        stem_size = round_channels(stem_size, channel_multiplier)
        self.conv_stem = create_conv2d(in_chans,
                                       stem_size,
                                       3,
                                       stride=2,
                                       padding=pad_type)
        self.bn1 = norm_layer(stem_size, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

        # Middle stages (IR/ER/DS Blocks)
        builder = EfficientNetBuilder(channel_multiplier,
                                      8,
                                      None,
                                      32,
                                      pad_type,
                                      act_layer,
                                      se_kwargs,
                                      norm_layer,
                                      norm_kwargs,
                                      drop_path_rate,
                                      verbose=_DEBUG)
        self.blocks = nn.Sequential(*builder(stem_size, block_args))
        self.feature_info = builder.features
        head_chs = builder.in_chs

        # Head + Pooling
        self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
        num_pooled_chs = head_chs * self.global_pool.feat_mult()
        self.conv_head = create_conv2d(num_pooled_chs,
                                       self.num_features,
                                       1,
                                       padding=pad_type,
                                       bias=head_bias)
        self.act2 = act_layer(inplace=True)
        self.classifier = nn.Linear(
            self.num_features,
            num_classes) if num_classes > 0 else nn.Identity()

        efficientnet_init_weights(self)