def __init__(self, in_channels, bn_eps): super(InceptInitBlock, self).__init__() self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=32, stride=2, padding=0, bn_eps=bn_eps) self.conv2 = conv3x3_block(in_channels=32, out_channels=32, stride=1, padding=0, bn_eps=bn_eps) self.conv3 = conv3x3_block(in_channels=32, out_channels=64, stride=1, padding=1, bn_eps=bn_eps) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) self.conv4 = conv1x1_block(in_channels=64, out_channels=80, stride=1, padding=0, bn_eps=bn_eps) self.conv5 = conv3x3_block(in_channels=80, out_channels=192, stride=1, padding=0, bn_eps=bn_eps) self.conv6 = conv3x3_block(in_channels=192, out_channels=256, stride=2, padding=0, bn_eps=bn_eps)
def __init__(self, in_channels, out_channels, bn_use_global_stats=False, bn_cudnn_off=False, **kwargs): super(SEInitBlock, self).__init__(**kwargs) mid_channels = out_channels // 2 with self.name_scope(): self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, strides=2, bn_use_global_stats=bn_use_global_stats, bn_cudnn_off=bn_cudnn_off) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, bn_use_global_stats=bn_use_global_stats, bn_cudnn_off=bn_cudnn_off) self.conv3 = conv3x3_block( in_channels=mid_channels, out_channels=out_channels, bn_use_global_stats=bn_use_global_stats, bn_cudnn_off=bn_cudnn_off) self.pool = nn.MaxPool2D( pool_size=3, strides=2, padding=1)
def __init__(self, in_channels, out_channels, stride): super(ResBlock, self).__init__() self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride) self.conv2 = conv3x3_block(in_channels=out_channels, out_channels=out_channels, activation=None, activate=False)
def __init__(self, in_channels, out_channels): super(SEInitBlock, self).__init__() mid_channels = out_channels // 2 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, stride=2) self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv3x3_block(in_channels=mid_channels, out_channels=out_channels) self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def __init__(self, in_channels, out_channels, stride, bias=False, bottleneck_factor=2, activation=(lambda: nn.ReLU(inplace=True))): super(IbpResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels, bias=bias, activation=activation) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride, bias=bias, activation=activation) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, bias=bias, activation=None)
def __init__(self, in_channels, out_channels, activation): super(IbpBackbone, self).__init__() dilations = (3, 3, 4, 4, 5, 5) mid1_channels = out_channels // 4 mid2_channels = out_channels // 2 self.conv1 = conv7x7_block( in_channels=in_channels, out_channels=mid1_channels, stride=2, activation=activation) self.res1 = IbpResUnit( in_channels=mid1_channels, out_channels=mid2_channels, activation=activation) self.pool = nn.MaxPool2d( kernel_size=2, stride=2) self.res2 = IbpResUnit( in_channels=mid2_channels, out_channels=mid2_channels, activation=activation) self.dilation_branch = nn.Sequential() for i, dilation in enumerate(dilations): self.dilation_branch.add_module("block{}".format(i + 1), conv3x3_block( in_channels=mid2_channels, out_channels=mid2_channels, padding=dilation, dilation=dilation, activation=activation))
def __init__(self, in_channels, channels, data_format="channels_last", **kwargs): super(Stem, self).__init__(**kwargs) assert (len(channels) == 3) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=channels[0], strides=2, padding=0, data_format=data_format, name="conv1") self.conv2 = dwsconv3x3_block( in_channels=channels[0], out_channels=channels[1], strides=2, data_format=data_format, name="conv2") self.conv3 = dwsconv3x3_block( in_channels=channels[1], out_channels=channels[2], strides=2, data_format=data_format, name="conv3")
def __init__(self, in_channels, out_channels, pose_att=True): super(DANetHeadBranch, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels) if pose_att: self.att = PosAttBlock(mid_channels) else: self.att = ChaAttBlock() self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv1x1(in_channels=mid_channels, out_channels=out_channels, bias=True) self.dropout = nn.Dropout(p=dropout_rate, inplace=False)
def __init__(self, out_channels, use_bn, increase, scales): super(Features, self).__init__() self.scales = scales # Regress 5 different scales of heatmaps per stack self.before_regress = nn.ModuleList([ nn.Sequential( conv3x3_block(in_channels=(out_channels + i * increase), out_channels=out_channels, bias=(not use_bn), use_bn=use_bn), conv3x3_block(in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn), SEBlock(channels=out_channels), ) for i in range(scales) ])
def __init__(self, out_channels, use_bn, activation): super(IbpPreBlock, self).__init__() self.conv1 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation) self.conv2 = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation) self.se = SEBlock( channels=out_channels, use_conv=False, mid_activation=activation)
def __init__(self, in_channels, out_channels, bn_eps): super(ESPFinalBlock, self).__init__() self.conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(out_channels))) self.deconv = nn.ConvTranspose2d(in_channels=out_channels, out_channels=out_channels, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False)
def __init__(self, in_channels=64, mid_channels=64, classes=19): super(AuxHead, self).__init__() with self.name_scope(): self.block = nn.HybridSequential() with self.block.name_scope(): self.block.add( conv3x3_block(in_channels=in_channels, out_channels=mid_channels)) self.block.add(nn.Dropout(0.1)) self.block.add( conv1x1(in_channels=mid_channels, out_channels=classes, use_bias=True))
def __init__(self, in_channels, channels): super(Steam, self).__init__() assert (len(channels) == 3) with self.name_scope(): self.conv = conv3x3_block(in_channels=in_channels, out_channels=channels[0], strides=2, padding=0) self.conv2 = dwsconv3x3_block(in_channels=channels[0], out_channels=channels[1], strides=2) self.conv3 = dwsconv3x3_block(in_channels=channels[1], out_channels=channels[2], strides=2)
def __init__(self, in_channels, mid_channels, out_channels, bn_eps): super(SBEncoderInitBlock, self).__init__() self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels, stride=2, bn_eps=bn_eps, activation=(lambda: nn.PReLU(mid_channels))) self.conv2 = dwsconv3x3_block( in_channels=mid_channels, out_channels=out_channels, stride=2, dw_use_bn=False, bn_eps=bn_eps, dw_activation=None, pw_activation=(lambda: nn.PReLU(out_channels)), se_reduction=1)
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width): super(SENetBottleneck, self).__init__() mid_channels = out_channels // 4 D = int(math.floor(mid_channels * (bottleneck_width / 64.0))) group_width = cardinality * D group_width2 = group_width // 2 self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=group_width2) self.conv2 = conv3x3_block(in_channels=group_width2, out_channels=group_width, stride=stride, groups=cardinality) self.conv3 = conv1x1_block(in_channels=group_width, out_channels=out_channels, activate=False)
def __init__(self, in_channels, depth, growth_rate, use_bn, activation): super(Hourglass, self).__init__() self.depth = depth self.down = nn.MaxPool2d( kernel_size=2, stride=2) self.up = nn.Upsample( scale_factor=2, mode="nearest") hg = [] for i in range(depth): res = [ IbpResUnit( in_channels=in_channels + growth_rate * i, out_channels=in_channels + growth_rate * i, activation=activation), IbpResUnit( in_channels=in_channels + growth_rate * i, out_channels=in_channels + growth_rate * (i + 1), activation=activation), IbpResUnit( in_channels=in_channels + growth_rate * (i + 1), out_channels=in_channels + growth_rate * i, activation=activation), conv3x3_block( in_channels=in_channels + growth_rate * i, out_channels=in_channels + growth_rate * i, bias=(not use_bn), use_bn=use_bn, activation=activation), ] if i == (self.depth - 1): res.append(IbpResUnit( in_channels=in_channels + growth_rate * (i + 1), out_channels=in_channels + growth_rate * (i + 1), activation=activation)) hg.append(nn.ModuleList(res)) self.hg = nn.ModuleList(hg)
def __init__(self, in_channels, out_channels, use_bn, activation): super(IbpUpBlock, self).__init__() self.res = IbpResUnit( in_channels=in_channels, out_channels=out_channels, activation=activation) self.up = nn.Upsample( scale_factor=2, mode="nearest") self.conv = conv3x3_block( in_channels=out_channels, out_channels=out_channels, bias=(not use_bn), use_bn=use_bn, activation=activation)
def __init__(self, in_channels, out_channels, stride, bottleneck_factor=4): super(RegNetBottleneck, self).__init__() self.resize = (stride > 1) mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block( in_channels=in_channels, out_channels=mid_channels) self.conv2 = conv3x3_block( in_channels=mid_channels, out_channels=mid_channels, stride=stride) self.conv3 = conv1x1_block( in_channels=mid_channels, out_channels=out_channels, activation=None)
def __init__(self, in_channels, mid_channels, classes, data_format="channels_last", **kwargs): super(AuxHead, self).__init__(**kwargs) self.conv1 = conv3x3_block( in_channels=in_channels, out_channels=mid_channels, data_format=data_format, name="conv1") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv2 = conv1x1( in_channels=mid_channels, out_channels=classes, use_bias=True, data_format=data_format, name="conv2")
def _make_lower_residual(self, depth_id): pack_layers = [ IbpResUnit(in_channels=self.channels + self.increase * depth_id, out_channels=self.channels + self.increase * depth_id, activation=self.activation), IbpResUnit(in_channels=self.channels + self.increase * depth_id, out_channels=self.channels + self.increase * (depth_id + 1), activation=self.activation), IbpResUnit(in_channels=self.channels + self.increase * (depth_id + 1), out_channels=self.channels + self.increase * depth_id, activation=self.activation), conv3x3_block(in_channels=self.channels + self.increase * depth_id, out_channels=self.channels + self.increase * depth_id, bias=(not self.use_bn), use_bn=self.use_bn, activation=self.activation), ] return pack_layers
def __init__(self, in_channels, out_channels, stride, padding=1, dilation=1, conv1_stride=False, bottleneck_factor=4): super(ResBottleneck, self).__init__() mid_channels = out_channels // bottleneck_factor self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, stride=(stride if conv1_stride else 1)) self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, stride=(1 if conv1_stride else stride), padding=padding, dilation=dilation) self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, activation=None, activate=False)
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, identity_conv3x3): super(SENetUnit, self).__init__() self.resize_identity = (in_channels != out_channels) or (stride != 1) self.body = SENetBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width) self.se = SEBlock(channels=out_channels) if self.resize_identity: if identity_conv3x3: self.identity_conv = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) else: self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activate=False) self.activ = nn.ReLU(inplace=True)
def __init__(self, in_channels, out_channels, bn_eps, in_size): super(APN, self).__init__() self.in_size = in_size att_out_channels = 1 self.pool_branch = PoolingBranch( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps, in_size=in_size, down_size=1) self.body = conv1x1_block( in_channels=in_channels, out_channels=out_channels, bias=True, bn_eps=bn_eps) down_seq = nn.Sequential() down_seq.add_module("down1", conv7x7_block( in_channels=in_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps)) down_seq.add_module("down2", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps)) down_seq.add_module("down3", nn.Sequential( conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, stride=2, bias=True, bn_eps=bn_eps), conv3x3_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps) )) up_seq = nn.Sequential() up = InterpolationBlock(scale_factor=2) up_seq.add_module("up1", up) up_seq.add_module("up2", up) up_seq.add_module("up3", up) skip_seq = nn.Sequential() skip_seq.add_module("skip1", BreakBlock()) skip_seq.add_module("skip2", conv7x7_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps)) skip_seq.add_module("skip3", conv5x5_block( in_channels=att_out_channels, out_channels=att_out_channels, bias=True, bn_eps=bn_eps)) self.hg = Hourglass( down_seq=down_seq, up_seq=up_seq, skip_seq=skip_seq)