def __init__(self, in_channels, out_channels, strides, bn_use_global_stats=False, expansion=True, remove_exp_conv=False, activation=(lambda: nn.Activation("relu")), **kwargs): super(LinearBottleneck, self).__init__(**kwargs) self.residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 if expansion else in_channels self.use_exp_conv = (expansion or (not remove_exp_conv)) with self.name_scope(): if self.use_exp_conv: self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats, activation=activation) self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, bn_use_global_stats=bn_use_global_stats, activation=activation) self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, activation=None)
def __init__(self, in_channels, out_channels, strides, data_format="channels_last", **kwargs): super(LinearBottleneck, self).__init__(**kwargs) self.residual = (in_channels == out_channels) and (strides == 1) mid_channels = in_channels * 6 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = dwconv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, stride, bias=False, bottleneck_factor=2, activation=(lambda: nn.ReLU(inplace=True))): super(IbpResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, activation=activation) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bias=bias, activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, activation=None)
def __init__(self, highter_in_channels, lower_in_channels, out_channels, height, width, scale_factor=4, **kwargs): super(FeatureFusionModule, self).__init__() self.scale_factor = scale_factor self._up_kwargs = {'height': height, 'width': width} with self.name_scope(): self.dwconv = dwconv3x3_block(in_channels=lower_in_channels, out_channels=out_channels) self.conv_lower_res = conv1x1_block(in_channels=out_channels, out_channels=out_channels, use_bias=True, activation=None) self.conv_higher_res = conv1x1_block( in_channels=highter_in_channels, out_channels=out_channels, use_bias=True, activation=None) self.activ = nn.Activation("relu")
def __init__(self, channels, strides=1, downsample=None, in_channels=None): super(Bottleneck, self).__init__() self.avd = (strides > 1) mid_channels = channels out_channels = channels * 4 self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, strides=1) self.conv2 = saconv3x3_block( in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None) if self.avd: self.avd_layer = nn.AvgPool2D(3, strides, padding=1) self.relu3 = nn.Activation('relu') self.downsample = downsample
def __init__(self, in_channels, out_channels): super(BranchBlock, self).__init__() self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=in_channels, bias=True, use_bn=False) self.conv2 = conv1x1_block(in_channels=in_channels, out_channels=out_channels, bias=True, use_bn=False, activation=None)
def __init__(self, x_dim, y_dim, use_bn): super(Merge, self).__init__() self.conv = conv1x1_block(in_channels=x_dim, out_channels=y_dim, bias=(not use_bn), use_bn=use_bn, activation=None)
def __init__(self, in_channels, out_channels, stride=1, bias=False, bottleneck_factor=2, activation=(lambda: nn.ReLU(inplace=True))): super(IbpResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = IbpResBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, bottleneck_factor=bottleneck_factor, activation=activation) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, bias=bias, activation=None) self.activ = get_activation_layer(activation)
def __init__(self, in_channels, out_channels, in_size, data_format="channels_last", **kwargs): super(FastPyramidPooling, self).__init__(**kwargs) down_sizes = [1, 2, 3, 6] mid_channels = in_channels // 4 self.branches = Concurrent( data_format=data_format, name="branches") self.branches.add(Identity(name="branch1")) for i, down_size in enumerate(down_sizes): self.branches.add(PoolingBranch( in_channels=in_channels, out_channels=mid_channels, in_size=in_size, down_size=down_size, data_format=data_format, name="branch{}".format(i + 2))) self.conv = conv1x1_block( in_channels=(in_channels * 2), out_channels=out_channels, data_format=data_format, name="conv")
def __init__(self, in_channels, out_channels, stride, output_padding, bias): super(DecoderStage, self).__init__() mid_channels = in_channels // 4 self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, bias=bias) self.conv2 = deconv3x3_block(in_channels=mid_channels, out_channels=mid_channels, stride=stride, out_padding=output_padding, bias=bias) self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, bias=bias)
def __init__(self, in_channels, out_channels, bias): super().__init__() self.conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, bias=bias, activation=None) self.unpool = nn.MaxUnpool2d(kernel_size=2)
def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, bottleneck=True, conv1_stride=False): super(ResUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) if bottleneck: self.body = ResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, padding=padding, dilation=dilation, conv1_stride=conv1_stride) else: self.body = ResBlock(in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None, activate=False) self.activ = nn.ReLU(inplace=True)
def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block(in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block(in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block(in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block(in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.conv6 = conv3x3_block(in_channels=192, out_channels=256, stride=2, padding=0, bn_eps=bn_eps)
def __init__(self, in_channels, mid_channels): super(BranchUnit, self).__init__() self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, bias=True, use_bn=False) self.conv_score = BranchBlock(in_channels=mid_channels, out_channels=2) self.conv_bbox = BranchBlock(in_channels=mid_channels, out_channels=4)
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SENetBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=group_width2) self.conv2 = conv3x3_block(in_channels=group_width2, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block(in_channels=group_width, out_channels=out_channels, activate=False)
def __init__(self, in_channels, out_channels, in_size, down_size, **kwargs): super(PoolingBranch, self).__init__(**kwargs) self.in_size = in_size self.down_size = down_size with self.name_scope(): self.conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels)
def __init__(self, channels, mid_channels, depth, growth_rate, merge, use_bn, activation): super(IbpPass, self).__init__() self.merge = merge down_seq = nn.Sequential() up_seq = nn.Sequential() skip_seq = nn.Sequential() top_channels = channels bottom_channels = channels for i in range(depth): bottom_channels += growth_rate down_seq.add_module("down{}".format(i + 1), IbpDownBlock( in_channels=top_channels, out_channels=bottom_channels, use_res_extra=(i == 0), activation=activation)) up_seq.add_module("up{}".format(i + 1), IbpUpBlock( in_channels=bottom_channels, out_channels=top_channels, use_bn=use_bn, activation=activation)) skip_seq.add_module("skip{}".format(i + 1), Identity()) top_channels = bottom_channels self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq, return_first_skip=False) self.pre_block = IbpPreBlock( out_channels=channels, use_bn=use_bn, activation=activation) self.post_block = conv1x1_block( in_channels=channels, out_channels=mid_channels, bias=True, use_bn=False, activation=None) if self.merge: self.pre_merge_block = MergeBlock( in_channels=channels, out_channels=channels, use_bn=use_bn) self.post_merge_block = MergeBlock( in_channels=mid_channels, out_channels=channels, use_bn=use_bn)
def __init__(self, in_channels, out_channels, use_bn): super(MergeBlock, self).__init__() self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=None)
def __init__(self, in_channels, out_channels, stride, bottleneck_factor=4): super(RegNetBottleneck, self).__init__() self.resize = (stride > 1) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None)
def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation) self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, activation=None, activate=False)
def __init__(self, in_channels, out_channels, dilation, dropout_rate, bn_eps): super(EDAUnit, self).__init__() self.use_dropout = (dropout_rate != 0.0) mid_channels = out_channels - in_channels self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, bias=True) self.conv2 = EDABlock(channels=mid_channels, dilation=dilation, dropout_rate=dropout_rate, bn_eps=bn_eps) self.activ = nn.ReLU(inplace=True)
def __init__(self, stacks, backbone_out_channels, outs_channels, growth_rate, use_bn, in_channels=3, in_size=(256, 256)): super(IbpPose, self).__init__() assert (in_size is not None) activation = (lambda: nn.LeakyReLU(inplace=True)) self.scales = 5 self.stacks = stacks self.backbone = IbpBackbone( in_channels=in_channels, out_channels=backbone_out_channels, activation=activation) self.hourglass = nn.ModuleList([Hourglass( in_channels=backbone_out_channels, depth=4, growth_rate=growth_rate, use_bn=use_bn, activation=activation) for _ in range(stacks)]) self.features = nn.ModuleList([FeaturesBlock( out_channels=backbone_out_channels, use_bn=use_bn, increase=growth_rate, scales=self.scales) for _ in range(stacks)]) self.outs = nn.ModuleList([nn.ModuleList([conv1x1_block( in_channels=backbone_out_channels, out_channels=outs_channels, bias=True, use_bn=False, activation=None) for j in range(self.scales)]) for i in range(stacks)]) self.merge_features = nn.ModuleList([nn.ModuleList([MergeBlock( in_channels=backbone_out_channels, out_channels=backbone_out_channels + j * growth_rate, use_bn=use_bn) for j in range(self.scales)]) for i in range(stacks - 1)]) self.merge_preds = nn.ModuleList([nn.ModuleList([MergeBlock( in_channels=outs_channels, out_channels=backbone_out_channels + j * growth_rate, use_bn=use_bn) for j in range(self.scales)]) for i in range(stacks - 1)]) self._initialize_weights()
def __init__(self, x_in_channels, y_in_channels, out_channels, x_in_size, data_format="channels_last", **kwargs): super(FeatureFusion, self).__init__(**kwargs) self.x_in_size = x_in_size self.data_format = data_format self.up = InterpolationBlock( scale_factor=None, out_size=x_in_size, data_format=data_format, name="up") self.low_dw_conv = dwconv3x3_block( in_channels=y_in_channels, out_channels=out_channels, data_format=data_format, name="low_dw_conv") self.low_pw_conv = conv1x1_block( in_channels=out_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="low_pw_conv") self.high_conv = conv1x1_block( in_channels=x_in_channels, out_channels=out_channels, use_bias=True, activation=None, data_format=data_format, name="high_conv") self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, in_size): super(FastPyramidPooling, self).__init__() down_sizes = [1, 2, 3, 6] mid_channels = in_channels // 4 with self.name_scope(): self.branches = Concurrent() self.branches.add(Identity()) for down_size in down_sizes: self.branches.add( PoolingBranch(in_channels=in_channels, out_channels=mid_channels, in_size=in_size, down_size=down_size)) self.conv = conv1x1_block(in_channels=(in_channels * 2), out_channels=out_channels)
def __init__(self, dim2, num_classes, bn_eps): super(SBDecoder, self).__init__() self.decode1 = SBDecodeBlock(channels=num_classes, bn_eps=bn_eps) self.decode2 = SBDecodeBlock(channels=num_classes, bn_eps=bn_eps) self.conv3c = conv1x1_block(in_channels=dim2, out_channels=num_classes, bn_eps=bn_eps, activation=(lambda: nn.PReLU(num_classes))) self.output = nn.ConvTranspose2d(in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False) self.up = InterpolationBlock(scale_factor=2)
def __init__(self, in_channels, out_channels, stride): super(RegNetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = RegNetBottleneck( in_channels=in_channels, out_channels=out_channels, stride=stride) if self.resize_identity: self.identity_conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None) self.activ = nn.ReLU(inplace=True)
def __init__(self, in_channels, out_channels, bias, bn_eps, in_size, down_size): super(PoolingBranch, self).__init__() self.in_size = in_size self.pool = nn.AdaptiveAvgPool2d(output_size=down_size) self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=bias, bn_eps=bn_eps) self.up = InterpolationBlock( scale_factor=None, out_size=in_size)
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, dw_use_bn=True, pw_use_bn=True, bn_eps=1e-5, dw_activation=(lambda: nn.ReLU(inplace=True)), pw_activation=(lambda: nn.ReLU(inplace=True)), se_reduction=0): super(DwsConvBlock, self).__init__() self.use_se = (se_reduction > 0) self.dw_conv = dwconv_block(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=dw_use_bn, bn_eps=bn_eps, activation=dw_activation) if self.use_se: self.se = SEBlock( channels=in_channels, reduction=se_reduction, round_mid=False, mid_activation=(lambda: nn.PReLU(in_channels // se_reduction)), out_activation=(lambda: nn.PReLU(in_channels))) self.pw_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=pw_use_bn, bn_eps=bn_eps, activation=pw_activation)
def __init__(self, in_channels, out_channels, in_size, down_size, data_format="channels_last", **kwargs): super(PoolingBranch, self).__init__(**kwargs) self.in_size = in_size self.down_size = down_size self.data_format = data_format self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv") self.up = InterpolationBlock( scale_factor=None, out_size=in_size, data_format=data_format, name="up")
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, identity_conv3x3): super(SENetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = SENetBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: if identity_conv3x3: self.identity_conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) else: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.activ = nn.ReLU(inplace=True)