def __init__(self, in_chnls, out_chnls, kernel_size, expansion, stride, is_se=False, activation=nn.ReLU(inplace=True)): super(MBConv, self).__init__() self.is_se = is_se self.is_shortcut = (stride == 1) and (in_chnls == out_chnls) self.trans1 = BN_Conv2d(in_chnls, in_chnls * expansion, 1, 1, 0, activation=activation) self.DWConv = BN_Conv2d(in_chnls * expansion, in_chnls * expansion, kernel_size, stride=stride, padding=kernel_size // 2, groups=in_chnls * expansion, activation=activation) if self.is_se: self.se = SE(in_chnls * expansion, 4) #se ratio = 0.25 self.trans2 = BN_Conv2d(in_chnls * expansion, out_chnls, 1, 1, 0, activation=None) # Linear activation
def __init__(self, in_chnls, cardinality, group_depth, stride, is_se=False): super(ResNeXt_Block, self).__init__() self.is_se = is_se self.group_chnls = cardinality * group_depth self.conv1 = BN_Conv2d(in_chnls, self.group_chnls, 1, stride=1, padding=0) self.conv2 = BN_Conv2d(self.group_chnls, self.group_chnls, 3, stride=stride, padding=1, groups=cardinality) self.conv3 = nn.Conv2d(self.group_chnls, self.group_chnls * 2, 1, stride=1, padding=0) self.bn = nn.BatchNorm2d(self.group_chnls * 2) if self.is_se: self.se = SE(self.group_chnls * 2, 16) self.short_cut = nn.Sequential( nn.Conv2d(in_chnls, self.group_chnls * 2, 1, stride, 0, bias=False), nn.BatchNorm2d(self.group_chnls * 2))
def __init__(self, in_channels, out_channels, strides, is_se=False): super(BottleNeck, self).__init__() self.is_se = is_se self.conv1 = BN_Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False) # same padding self.conv2 = BN_Conv2d(out_channels, out_channels, 3, stride=strides, padding=1, bias=False) self.conv3 = BN_Conv2d(out_channels, out_channels * 4, 1, stride=1, padding=0, bias=False, activation=None) if self.is_se: self.se = SE(out_channels * 4, 16) # fit input with residual output self.shortcut = nn.Sequential( nn.Conv2d(in_channels, out_channels * 4, 1, stride=strides, padding=0, bias=False), nn.BatchNorm2d(out_channels * 4))
def __init__(self, channels, is_se=False, inner_channels=None): super(Dark_block, self).__init__() self.is_se = is_se if inner_channels is None: inner_channels = channels // 2 self.conv1 = BN_Conv2d_Leaky(channels, inner_channels, 1, 1, 0) self.conv2 = nn.Conv2d(inner_channels, channels, 3, 1, 1) self.bn = nn.BatchNorm2d(channels) if self.is_se: self.se = SE(channels, 16)
def __init__(self, in_chnls, out_chnls, is_se=False, is_residual=False, c_ratio=0.5, groups=2): super(BasicUnit, self).__init__() self.is_se, self.is_res = is_se, is_residual self.l_chnls = int(in_chnls * c_ratio) self.r_chnls = in_chnls - self.l_chnls self.ro_chnls = out_chnls - self.l_chnls self.groups = groups # layers self.conv1 = BN_Conv2d(self.r_chnls, self.ro_chnls, 1, 1, 0) self.dwconv2 = BN_Conv2d( self.ro_chnls, self.ro_chnls, 3, 1, 1, # same padding, depthwise conv groups=self.ro_chnls, activation=None) act = None if self.is_res else nn.ReLU(inplace=True) self.conv3 = BN_Conv2d(self.ro_chnls, self.ro_chnls, 1, 1, 0, activation=act) if self.is_se: self.se = SE(self.ro_chnls, 16) if self.is_res: self.shortcut = nn.Sequential() if self.r_chnls != self.ro_chnls: self.shortcut = BN_Conv2d(self.r_chnls, self.ro_chnls, 1, 1, 0, activation=None)