def __init__(self, in_channels, out_channels, strides, data_format="channels_last", **kwargs): super(LinearBottleneck, self).__init__(**kwargs) self.residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, strides, bn_use_global_stats=False, expansion=True, remove_exp_conv=False, activation=(lambda: nn.Activation("relu")), **kwargs): super(LinearBottleneck, self).__init__(**kwargs) self.residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 if expansion else in_channels self.use_exp_conv = (expansion or (not remove_exp_conv)) with self.name_scope(): if self.use_exp_conv: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats, activation=activation) self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, bn_use_global_stats=bn_use_global_stats, activation=activation) self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=None)
def __init__(self, highter_in_channels, lower_in_channels, out_channels, height, width, scale_factor=4, **kwargs): super(FeatureFusionModule, self).__init__() self.scale_factor = scale_factor self._up_kwargs = {'height': height, 'width': width} with self.name_scope(): self.dwconv = dwconv3x3_block(in_channels=lower_in_channels, out_channels=out_channels) self.conv_lower_res = conv1x1_block(in_channels=out_channels, out_channels=out_channels, use_bias=True, activation=None) self.conv_higher_res = conv1x1_block( in_channels=highter_in_channels, out_channels=out_channels, use_bias=True, activation=None) self.activ = nn.Activation("relu")
def __init__(self, x_in_channels, y_in_channels, out_channels, x_in_size, data_format="channels_last", **kwargs): super(FeatureFusion, self).__init__(**kwargs) self.x_in_size = x_in_size self.data_format = data_format self.up = InterpolationBlock( scale_factor=None, out_size=x_in_size, data_format=data_format, name="up") self.low_dw_conv = dwconv3x3_block( in_channels=y_in_channels, out_channels=out_channels, data_format=data_format, name="low_dw_conv") self.low_pw_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="low_pw_conv") self.high_conv = conv1x1_block( in_channels=x_in_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="high_conv") self.activ = nn.ReLU()