def __init__(self, channels, reduction=8): super(PosAttBlock, self).__init__() mid_channels = channels // reduction self.query_conv = conv1x1(in_channels=channels, out_channels=mid_channels, bias=True) self.key_conv = conv1x1(in_channels=channels, out_channels=mid_channels, bias=True) self.value_conv = conv1x1(in_channels=channels, out_channels=channels, bias=True) self.scale = ScaleBlock() self.softmax = nn.Softmax(dim=-1)
def __init__(self, in_channels, out_channels, kernel_sizes, scale_factors, use_residual, bn_eps): super(ESPBlock, self).__init__() self.use_residual = use_residual groups = len(kernel_sizes) mid_channels = int(out_channels / groups) res_channels = out_channels - groups * mid_channels self.conv = conv1x1(in_channels=in_channels, out_channels=mid_channels, groups=groups) self.c_shuffle = ChannelShuffle(channels=mid_channels, groups=groups) self.branches = Concurrent() for i in range(groups): out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels self.branches.add_module( "branch{}".format(i + 1), SBBlock(in_channels=mid_channels, out_channels=out_channels_i, kernel_size=kernel_sizes[i], scale_factor=scale_factors[i], bn_eps=bn_eps)) self.preactiv = PreActivation(in_channels=out_channels, bn_eps=bn_eps)
def __init__(self, in_channels, out_channels, init_block_channels, down_channels_list, channels_list, kernel_sizes_list, scale_factors_list, use_residual_list, bn_eps): super(SBEncoder, self).__init__() self.init_block = SBEncoderInitBlock( in_channels=in_channels, mid_channels=init_block_channels[0], out_channels=init_block_channels[1], bn_eps=bn_eps) in_channels = init_block_channels[1] self.stage1 = SBStage(in_channels=in_channels, down_channels=down_channels_list[0], channels_list=channels_list[0], kernel_sizes_list=kernel_sizes_list[0], scale_factors_list=scale_factors_list[0], use_residual_list=use_residual_list[0], se_reduction=1, bn_eps=bn_eps) in_channels = down_channels_list[0] + channels_list[0][-1] self.stage2 = SBStage(in_channels=in_channels, down_channels=down_channels_list[1], channels_list=channels_list[1], kernel_sizes_list=kernel_sizes_list[1], scale_factors_list=scale_factors_list[1], use_residual_list=use_residual_list[1], se_reduction=2, bn_eps=bn_eps) in_channels = down_channels_list[1] + channels_list[1][-1] self.output = conv1x1(in_channels=in_channels, out_channels=out_channels)
def __init__(self, bn_eps): super(InceptionAUnit, self).__init__() self.scale = 0.17 in_channels = 256 self.branches = Concurrent() self.branches.add_module( "branch1", Conv1x1Branch(in_channels=in_channels, out_channels=32, bn_eps=bn_eps)) self.branches.add_module( "branch2", ConvSeqBranch(in_channels=in_channels, out_channels_list=(32, 32), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1), bn_eps=bn_eps)) self.branches.add_module( "branch3", ConvSeqBranch(in_channels=in_channels, out_channels_list=(32, 32, 32), kernel_size_list=(1, 3, 3), strides_list=(1, 1, 1), padding_list=(0, 1, 1), bn_eps=bn_eps)) self.conv = conv1x1(in_channels=96, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True)
def __init__(self, in_channels, out_channels, kernel_size, scale_factor, bn_eps): super(SBBlock, self).__init__() self.use_scale = (scale_factor > 1) if self.use_scale: self.down_scale = nn.AvgPool2d(kernel_size=scale_factor, stride=scale_factor) self.up_scale = InterpolationBlock(scale_factor=scale_factor) use_fdw = (scale_factor > 0) if use_fdw: fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block self.conv1 = fdwconv3x3_class( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(in_channels))) else: self.conv1 = dwconv3x3_block( in_channels=in_channels, out_channels=in_channels, bn_eps=bn_eps, activation=(lambda: nn.PReLU(in_channels))) self.conv2 = conv1x1(in_channels=in_channels, out_channels=out_channels) self.bn = nn.BatchNorm2d(num_features=out_channels, eps=bn_eps)
def __init__(self, in_channels, classes, data_format="channels_last", **kwargs): super(Head, self).__init__(**kwargs) self.conv1 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="conv1") self.conv2 = dwsconv3x3_block( in_channels=in_channels, out_channels=in_channels, data_format=data_format, name="conv2") self.dropout = nn.Dropout( rate=0.1, name="dropout") self.conv3 = conv1x1( in_channels=in_channels, out_channels=classes, use_bias=True, data_format=data_format, name="conv3")
def __init__(self, bn_eps, scale=0.2, activate=True): super(InceptionCUnit, self).__init__() self.activate = activate self.scale = scale in_channels = 1792 self.branches = Concurrent() self.branches.add_module( "branch1", Conv1x1Branch(in_channels=in_channels, out_channels=192, bn_eps=bn_eps)) self.branches.add_module( "branch2", ConvSeqBranch(in_channels=in_channels, out_channels_list=(192, 192, 192), kernel_size_list=(1, (1, 3), (3, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 1), (1, 0)), bn_eps=bn_eps)) self.conv = conv1x1(in_channels=384, out_channels=in_channels, bias=True) if self.activate: self.activ = nn.ReLU(inplace=True)
def __init__(self, in_channels, classes): super(Head, self).__init__() with self.name_scope(): self.dsconv1 = dwsconv3x3_block(in_channels=in_channels, out_channels=in_channels) self.dsconv2 = dwsconv3x3_block(in_channels=in_channels, out_channels=in_channels) self.dp = nn.Dropout(0.1) self.conv = conv1x1(in_channels=in_channels, out_channels=classes, use_bias=True)
def __init__(self, in_channels, out_channels, stride): super(ProposalBlock, self).__init__() mid_channels = 128 self.down_conv = conv3x3(in_channels=in_channels, out_channels=mid_channels, stride=stride, bias=True) self.activ = nn.ReLU(inplace=False) self.tidy_conv = conv1x1(in_channels=mid_channels, out_channels=out_channels, bias=True)
def __init__(self, in_channels=64, mid_channels=64, classes=19): super(AuxHead, self).__init__() with self.name_scope(): self.block = nn.HybridSequential() with self.block.name_scope(): self.block.add( conv3x3_block(in_channels=in_channels, out_channels=mid_channels)) self.block.add(nn.Dropout(0.1)) self.block.add( conv1x1(in_channels=mid_channels, out_channels=classes, use_bias=True))
def __init__(self, in_channels, out_channels): super(DANetHead, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 self.branch_pa = DANetHeadBranch(in_channels=in_channels, out_channels=out_channels, pose_att=True) self.branch_ca = DANetHeadBranch(in_channels=in_channels, out_channels=out_channels, pose_att=False) self.conv = conv1x1(in_channels=mid_channels, out_channels=out_channels, bias=True) self.dropout = nn.Dropout(p=dropout_rate, inplace=False)
def __init__(self, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(EDANet, self).__init__() assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size growth_rate = 40 channels = [15, 60, 130, 450] dilations = [[0], [0, 1, 1, 1, 2, 2], [0, 2, 2, 4, 4, 8, 8, 16, 16]] self.features = nn.Sequential() for i, dilations_per_stage in enumerate(dilations): out_channels = channels[i] stage = nn.Sequential() for j, dilation in enumerate(dilations_per_stage): if j == 0: stage.add_module( "unit{}".format(j + 1), DownBlock(in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps)) else: out_channels += growth_rate stage.add_module( "unit{}".format(j + 1), EDAUnit(in_channels=in_channels, out_channels=out_channels, dilation=dilation, dropout_rate=0.02, bn_eps=bn_eps)) in_channels = out_channels self.features.add_module("stage{}".format(i + 1), stage) self.head = conv1x1(in_channels=in_channels, out_channels=num_classes, bias=True) self.up = InterpolationBlock(scale_factor=8, align_corners=True)
def __init__(self, in_channels, out_channels, pose_att=True): super(DANetHeadBranch, self).__init__() mid_channels = in_channels // 4 dropout_rate = 0.1 self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=mid_channels) if pose_att: self.att = PosAttBlock(mid_channels) else: self.att = ChaAttBlock() self.conv2 = conv3x3_block(in_channels=mid_channels, out_channels=mid_channels) self.conv3 = conv1x1(in_channels=mid_channels, out_channels=out_channels, bias=True) self.dropout = nn.Dropout(p=dropout_rate, inplace=False)
def __init__(self, bn_eps): super(InceptionBUnit, self).__init__() self.scale = 0.10 in_channels = 896 self.branches = Concurrent() self.branches.add_module( "branch1", Conv1x1Branch(in_channels=in_channels, out_channels=128, bn_eps=bn_eps)) self.branches.add_module( "branch2", ConvSeqBranch(in_channels=in_channels, out_channels_list=(128, 128, 128), kernel_size_list=(1, (1, 7), (7, 1)), strides_list=(1, 1, 1), padding_list=(0, (0, 3), (3, 0)), bn_eps=bn_eps)) self.conv = conv1x1(in_channels=256, out_channels=in_channels, bias=True) self.activ = nn.ReLU(inplace=True)
def __init__(self, layers, channels, init_block_channels, cut_x, bn_eps=1e-5, aux=False, fixed_size=False, in_channels=3, in_size=(1024, 2048), num_classes=19): super(ESPNet, self).__init__(layers=layers, channels=channels, init_block_channels=init_block_channels, cut_x=cut_x, bn_eps=bn_eps, aux=aux, fixed_size=fixed_size, in_channels=in_channels, in_size=in_size, num_classes=num_classes) assert (aux is not None) assert (fixed_size is not None) assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0)) self.in_size = in_size self.num_classes = num_classes self.fixed_size = fixed_size self.skip1 = nn.BatchNorm2d(num_features=num_classes, eps=bn_eps) self.skip2 = conv1x1(in_channels=channels[1], out_channels=num_classes) self.up1 = nn.Sequential( nn.ConvTranspose2d(in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False)) self.up2 = nn.Sequential() self.up2.add_module( "block1", NormActivation(in_channels=(2 * num_classes), bn_eps=bn_eps, activation=(lambda: nn.PReLU(2 * num_classes)))) self.up2.add_module( "block2", ESPBlock(in_channels=(2 * num_classes), out_channels=num_classes, downsample=False, residual=False, bn_eps=bn_eps)) self.up2.add_module( "block3", DeconvBlock(in_channels=num_classes, out_channels=num_classes, kernel_size=2, stride=2, padding=0, bn_eps=bn_eps, activation=(lambda: nn.PReLU(num_classes)))) self.decoder_head = ESPFinalBlock(in_channels=(channels[0] + num_classes), out_channels=num_classes, bn_eps=bn_eps) self._init_params()