def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(SEInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="conv3") self.pool = MaxPool2d(pool_size=3, strides=2, padding=1, data_format=data_format, name="pool")
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(LwopRefinementBlock, self).__init__(**kwargs) self.pre_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="pre_conv") self.body = SimpleSequential(name="body") self.body.add( conv3x3_block(in_channels=out_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="block1")) self.body.add( conv3x3_block(in_channels=out_channels, out_channels=out_channels, padding=2, dilation=2, use_bias=True, data_format=data_format, name="block2"))
def __init__(self, out_channels, use_bn, activation, data_format="channels_last", **kwargs): super(IbpPreBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, use_bias=(not use_bn), use_bn=use_bn, activation=activation, data_format=data_format, name="conv2") self.se = SEBlock(channels=out_channels, use_conv=False, mid_activation=activation, data_format=data_format, name="se")
def __init__(self, in_channels, out_channels, mid_channels, strides=1, use_bias=True, use_bn=True, data_format="channels_last", **kwargs): super(InceptionDouble3x3Branch, self).__init__(**kwargs) self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv2") self.conv3 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, strides=strides, use_bias=use_bias, use_bn=use_bn, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, mid_channels, num_subblocks, data_format="channels_last", **kwargs): super(HRInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv2") in_channels = mid_channels self.subblocks = SimpleSequential(name="subblocks") for i in range(num_subblocks): self.subblocks.add(ResUnit( in_channels=in_channels, out_channels=out_channels, strides=1, bottleneck=True, data_format=data_format, name="block{}".format(i + 1))) in_channels = out_channels
def __init__(self, in_channels, out_channels, pose_att=True, data_format="channels_last", **kwargs): super(DANetHeadBranch, self).__init__(**kwargs) mid_channels = in_channels // 4 dropout_rate = 0.1 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") if pose_att: self.att = PosAttBlock(mid_channels, data_format=data_format, name="att") else: self.att = ChaAttBlock(data_format=data_format, name="att") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") self.conv3 = conv1x1(in_channels=mid_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv3") self.dropout = nn.Dropout(rate=dropout_rate, name="dropout")
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(VoVInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, strides=2, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, stride): super(ResBlock, self).__init__() self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride) self.conv2 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, activation=None)
def __init__(self, channels, init_block_channels, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet53, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") self.features.add( conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): if j == 0: stage.add( conv3x3_block(in_channels=in_channels, out_channels=out_channels, strides=2, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, name="unit{}".format(j + 1))) else: stage.add( DarkUnit(in_channels=in_channels, out_channels=out_channels, alpha=alpha, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add( nn.AveragePooling2D(pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense(units=classes, input_dim=in_channels, name="output1")
def __init__(self, in_channels, data_format="channels_last", **kwargs): super(XceptionInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=32, strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=32, out_channels=64, strides=1, padding=0, data_format=data_format, name="conv2")
def __init__(self, in_channels_list, out_channels_list, num_modules, num_branches, num_subblocks, data_format="channels_last", **kwargs): super(HRStage, self).__init__(**kwargs) self.branches = num_branches self.in_channels_list = out_channels_list in_branches = len(in_channels_list) out_branches = len(out_channels_list) self.transition = SimpleSequential(name="transition") for i in range(out_branches): if i < in_branches: if out_channels_list[i] != in_channels_list[i]: self.transition.add(conv3x3_block( in_channels=in_channels_list[i], out_channels=out_channels_list[i], strides=1, data_format=data_format, name="transition/block{}".format(i + 1))) else: self.transition.add(Identity(name="transition/block{}".format(i + 1))) else: conv3x3_seq = SimpleSequential(name="transition/conv3x3_seq{}".format(i + 1)) for j in range(i + 1 - in_branches): in_channels_i = in_channels_list[-1] out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i conv3x3_seq.add(conv3x3_block( in_channels=in_channels_i, out_channels=out_channels_i, strides=2, data_format=data_format, name="subblock{}".format(j + 1))) self.transition.add(conv3x3_seq) self.layers = SimpleSequential(name="layers") for i in range(num_modules): self.layers.add(HRBlock( in_channels_list=self.in_channels_list, out_channels_list=out_channels_list, num_branches=num_branches, num_subblocks=num_subblocks, data_format=data_format, name="block{}".format(i + 1))) self.in_channels_list = list(self.layers[-1].in_channels_list)
def __init__(self, in_channels, out_channels, skip_channels, mid_channels, strides, data_format="channels_last", **kwargs): super(SelecSLSUnit, self).__init__(**kwargs) self.data_format = data_format self.resize = (strides == 2) mid2_channels = mid_channels // 2 last_channels = 2 * mid_channels + (skip_channels if strides == 1 else 0) self.branch1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="branch1") self.branch2 = SelecSLSBlock( in_channels=mid_channels, out_channels=mid2_channels, data_format=data_format, name="branch2") self.branch3 = SelecSLSBlock( in_channels=mid2_channels, out_channels=mid2_channels, data_format=data_format, name="branch3") self.last_conv = conv1x1_block( in_channels=last_channels, out_channels=out_channels, data_format=data_format, name="last_conv")
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(StemBlock, self).__init__(**kwargs) mid1_channels = out_channels // 2 mid2_channels = out_channels * 2 self.first_conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, strides=2, data_format=data_format, name="first_conv") self.branches = Concurrent(data_format=data_format, name="branches") self.branches.add( PeleeBranch1(in_channels=out_channels, out_channels=out_channels, mid_channels=mid1_channels, strides=2, data_format=data_format, name="branch1")) self.branches.add( MaxPool2d(pool_size=2, strides=2, padding=0, data_format=data_format, name="branch2")) self.last_conv = conv1x1_block(in_channels=mid2_channels, out_channels=out_channels, data_format=data_format, name="last_conv")
def __init__(self, in_channels_list, out_channels_list, data_format="channels_last", **kwargs): super(HRFinalBlock, self).__init__(**kwargs) self.inc_blocks = SimpleSequential(name="inc_blocks") for i, in_channels_i in enumerate(in_channels_list): self.inc_blocks.add(ResUnit( in_channels=in_channels_i, out_channels=out_channels_list[i], strides=1, bottleneck=True, data_format=data_format, name="inc_blocks/block{}".format(i + 1))) self.down_blocks = SimpleSequential(name="down_blocks") for i in range(len(in_channels_list) - 1): self.down_blocks.add(conv3x3_block( in_channels=out_channels_list[i], out_channels=out_channels_list[i + 1], strides=2, use_bias=True, data_format=data_format, name="down_blocks/block{}".format(i + 1))) self.final_layer = conv1x1_block( in_channels=1024, out_channels=2048, strides=1, use_bias=True, data_format=data_format, name="final_layer")
def __init__(self, in_channels, mid_channels, out_channels, bn_eps, data_format="channels_last", **kwargs): super(SBEncoderInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, bn_eps=bn_eps, activation=(lambda: PReLU2( mid_channels, data_format=data_format, name="activ")), data_format=data_format, name="conv1") self.conv2 = dwsconv3x3_block( in_channels=mid_channels, out_channels=out_channels, strides=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: PReLU2( out_channels, data_format=data_format, name="activ")), se_reduction=1, data_format=data_format, name="conv2")
def __init__(self, in_channels, out_channels, strides, conv1_ibn, data_format="channels_last", **kwargs): super(IBNResBottleneck, self).__init__(**kwargs) mid_channels = out_channels // 4 self.conv1 = ibn_conv1x1_block(in_channels=in_channels, out_channels=mid_channels, use_ibn=conv1_ibn, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, strides=strides, data_format=data_format, name="conv2") self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, activation=None, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, strides, cardinality, bottleneck_width, data_format="channels_last", **kwargs): super(SENetBottleneck, self).__init__(**kwargs) mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=group_width2, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=group_width2, out_channels=group_width, strides=strides, groups=cardinality, data_format=data_format, name="conv2") self.conv3 = conv1x1_block(in_channels=group_width, out_channels=out_channels, activation=None, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(LwopEncoderFinalBlock, self).__init__(**kwargs) self.pre_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="pre_conv") self.body = SimpleSequential(name="body") for i in range(3): self.body.add( dwsconv3x3_block(in_channels=out_channels, out_channels=out_channels, dw_use_bn=False, pw_use_bn=False, dw_activation=(lambda: nn.ELU()), pw_activation=(lambda: nn.ELU()), data_format=data_format, name="block{}".format(i + 1))) self.post_conv = conv3x3_block(in_channels=out_channels, out_channels=out_channels, use_bias=True, use_bn=False, data_format=data_format, name="post_conv")
def __init__(self, channels, reduction_ratio=16, num_dil_convs=2, dilation=4, data_format="channels_last", **kwargs): super(SpatialGate, self).__init__(**kwargs) mid_channels = channels // reduction_ratio self.init_conv = conv1x1_block(in_channels=channels, out_channels=mid_channels, strides=1, use_bias=True, data_format=data_format, name="init_conv") self.dil_convs = SimpleSequential(name="dil_convs") for i in range(num_dil_convs): self.dil_convs.children.append( conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, strides=1, padding=dilation, dilation=dilation, use_bias=True, data_format=data_format, name="conv{}".format(i + 1))) self.final_conv = conv1x1(in_channels=mid_channels, out_channels=1, strides=1, use_bias=True, data_format=data_format, name="final_conv")
def __init__(self, in_channels, keypoints, data_format="channels_last", **kwargs): super(LwopDecoderInitBlock, self).__init__(**kwargs) self.data_format = data_format num_heatmap = keypoints num_paf = 2 * keypoints bend_mid_channels = 512 self.body = SimpleSequential(name="body") for i in range(3): self.body.add( conv3x3_block(in_channels=in_channels, out_channels=in_channels, use_bias=True, use_bn=False, data_format=data_format, name="block{}".format(i + 1))) self.heatmap_bend = LwopDecoderBend(in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_heatmap, data_format=data_format, name="heatmap_bend") self.paf_bend = LwopDecoderBend(in_channels=in_channels, mid_channels=bend_mid_channels, out_channels=num_paf, data_format=data_format, name="paf_bend")
def __init__(self, in_channels, out_channels, strides, use_bias=True, bottleneck_factor=2, squeeze_out=False, data_format="channels_last", **kwargs): super(LwopResBottleneck, self).__init__(**kwargs) mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, use_bias=use_bias, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, strides=strides, use_bias=use_bias, data_format=data_format, name="conv2") self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, use_bias=use_bias, activation=None, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, use_deptwise, activation, data_format="channels_last", **kwargs): super(HarDInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, strides=2, activation=activation, data_format=data_format, name="conv1") conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block self.conv2 = conv2_block_class(in_channels=mid_channels, out_channels=out_channels, activation=activation, data_format=data_format, name="conv2") if use_deptwise: self.downsample = dwconv3x3_block(in_channels=out_channels, out_channels=out_channels, strides=2, activation=None, data_format=data_format, name="downsample") else: self.downsample = MaxPool2d(pool_size=3, strides=2, padding=1, data_format=data_format, name="downsample")
def __init__(self, in_channels_low, in_channels_high, out_channels, classes, data_format="channels_last", **kwargs): super(CFFBlock, self).__init__(**kwargs) self.up = InterpolationBlock(scale_factor=2, data_format=data_format, name="up") self.conv_low = conv3x3_block(in_channels=in_channels_low, out_channels=out_channels, padding=2, dilation=2, activation=None, data_format=data_format, name="conv_low") self.conv_hign = conv1x1_block(in_channels=in_channels_high, out_channels=out_channels, activation=None, data_format=data_format, name="conv_hign") self.activ = nn.ReLU() self.conv_cls = conv1x1(in_channels=out_channels, out_channels=classes, data_format=data_format, name="conv_cls")
def __init__(self, in_channels, out_channels, bottleneck_factor=4, data_format="channels_last", **kwargs): super(FCNFinalBlock, self).__init__(**kwargs) assert (in_channels % bottleneck_factor == 0) self.data_format = data_format mid_channels = in_channels // bottleneck_factor self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=out_channels, use_bias=True, data_format=data_format, name="conv2")
def dark_convYxY(in_channels, out_channels, alpha, pointwise, data_format="channels_last", **kwargs): """ DarkNet unit. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. alpha : float Slope coefficient for Leaky ReLU activation. pointwise : bool Whether use 1x1 (pointwise) convolution or 3x3 convolution. data_format : str, default 'channels_last' The ordering of the dimensions in tensors. """ if pointwise: return conv1x1_block(in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs) else: return conv3x3_block(in_channels=in_channels, out_channels=out_channels, activation=nn.LeakyReLU(alpha=alpha), data_format=data_format, **kwargs)
def __init__(self, channels, init_block_channels, final_block_channels, classifier_mid_channels, kernels3, exp_factors, use_se, first_stride, bn_use_global_stats=False, in_channels=3, in_size=(224, 224), classes=1000, dropout=0.2, **kwargs): super(GhostNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes with self.name_scope(): self.features = nn.HybridSequential(prefix="") self.features.add( conv3x3_block(in_channels=in_channels, out_channels=init_block_channels, strides=2, bn_use_global_stats=bn_use_global_stats)) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = nn.HybridSequential(prefix="stage{}_".format(i + 1)) with stage.name_scope(): for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and ( (i != 0) or first_stride) else 1 use_kernel3 = kernels3[i][j] == 1 exp_factor = exp_factors[i][j] use_se_flag = use_se[i][j] == 1 stage.add( GhostUnit(in_channels=in_channels, out_channels=out_channels, strides=strides, use_kernel3=use_kernel3, exp_factor=exp_factor, use_se=use_se_flag, bn_use_global_stats=bn_use_global_stats)) in_channels = out_channels self.features.add(stage) self.features.add( conv1x1_block(in_channels=in_channels, out_channels=final_block_channels, bn_use_global_stats=bn_use_global_stats)) in_channels = final_block_channels self.features.add(nn.AvgPool2D(pool_size=7, strides=1)) self.output = nn.HybridSequential(prefix="") self.output.add( GhostClassifier(in_channels=in_channels, out_channels=classes, mid_channels=classifier_mid_channels, dropout=dropout)) self.output.add(nn.Flatten())
def __init__(self, in_channels_list, out_channels_list, links_list, use_deptwise, use_dropout, downsampling, activation, data_format="channels_last", **kwargs): super(HarDUnit, self).__init__(**kwargs) self.data_format = data_format self.links_list = links_list self.use_dropout = use_dropout self.downsampling = downsampling self.blocks = SimpleSequential(name="blocks") for i in range(len(links_list)): in_channels = in_channels_list[i] out_channels = out_channels_list[i] if use_deptwise: unit = invdwsconv3x3_block(in_channels=in_channels, out_channels=out_channels, pw_activation=activation, dw_activation=None, data_format=data_format, name="block{}".format(i + 1)) else: unit = conv3x3_block(in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="block{}".format(i + 1)) self.blocks.add(unit) if self.use_dropout: self.dropout = nn.Dropout(rate=0.1, name="dropout") self.conv = conv1x1_block(in_channels=in_channels_list[-1], out_channels=out_channels_list[-1], activation=activation, data_format=data_format, name="conv") if self.downsampling: if use_deptwise: self.downsample = dwconv3x3_block( in_channels=out_channels_list[-1], out_channels=out_channels_list[-1], strides=2, activation=None, data_format=data_format, name="downsample") else: self.downsample = MaxPool2d(pool_size=2, strides=2, data_format=data_format, name="downsample")
def __init__(self, in_channels, out_channels, mid_channels, data_format="channels_last", **kwargs): super(PeleeBranch2, self).__init__(**kwargs) self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels, data_format=data_format, name="conv2") self.conv3 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, data_format=data_format, name="conv3")
def __init__(self, in_channels, data_format="channels_last", **kwargs): super(PolyInitBlock, self).__init__(**kwargs) self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=32, strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = conv3x3_block(in_channels=32, out_channels=32, padding=0, data_format=data_format, name="conv2") self.conv3 = conv3x3_block(in_channels=32, out_channels=64, data_format=data_format, name="conv3") self.block1 = PolyBlock3a(data_format=data_format, name="block1") self.block2 = PolyBlock4a(data_format=data_format, name="block2") self.block3 = PolyBlock5a(data_format=data_format, name="block3")
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(Conv3x3Branch, self).__init__(**kwargs) self.conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, strides=2, padding=0, data_format=data_format, name="conv")