def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride: int, expansion: int, activation: type = nn.Hardswish, use_se: bool = True): super(InvertedResidual, self).__init__() # TODO: check if this is indeed correct # this is the same as the implementation from # https://github.com/d-li14/mobilenetv3.pytorch/ self.activation_after_se = use_se and expansion != 1 width = round_channels(expansion * in_channels) self.conv1 = (ConvBnAct2d(in_channels, width, 1, activation=activation) if expansion != 1 else nn.Identity()) self.conv2 = ConvBn2d(width, width, kernel_size, padding=kernel_size // 2, stride=stride, groups=width) self.act2 = activation() self.se = (SqueezeExcitation(width, width) if use_se else nn.Identity()) self.conv3 = ConvBn2d(width, out_channels, 1)
def __init__(self, in_channels, out_channels, reduction_ratio=4): super(SEBlock, self).__init__() reduced_channels = round_channels(in_channels / reduction_ratio, 4) self.conv1 = nn.Conv2d(in_channels, reduced_channels, 1) self.activation = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(reduced_channels, out_channels, 1)
def c(channels): return round_channels(channels * width_multiplier)
def c(channels): return round_channels(width_multiplier * channels, divisor=8 if width_multiplier >= 0.1 else 4)
def c(channels): "channel number mapper" return round_channels(width_multiplier * channels, 8 if width_multiplier > 0.1 else 4)
def __init__(self, in_channels, out_channels, reduction=4): super().__init__() mid_channels = round_channels(in_channels / reduction) self.conv1 = nn.Conv2d(in_channels, mid_channels, 1) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(mid_channels, out_channels, 1)