def __init__(self, input_channel=3, num_vec=0): super(PointNetFeat, self).__init__() self.num_vec = num_vec u = cfg.DATA.HEIGHT_HALF assert len(u) == 4 self.pointnet1 = PointNetModule(input_channel - 3, [64, 64, 128], u[0], 32, use_xyz=True, use_feature=True) self.pointnet2 = PointNetModule(input_channel - 3, [64, 64, 128], u[1], 64, use_xyz=True, use_feature=True) self.pointnet3 = PointNetModule(input_channel - 3, [128, 128, 256], u[2], 64, use_xyz=True, use_feature=True) self.pointnet4 = PointNetModule(input_channel - 3, [256, 256, 512], u[3], 128, use_xyz=True, use_feature=True) self.econv1 = Conv2d(32, 128, 1) self.econv2 = Conv2d(128, 128, 1) self.econv3 = Conv2d(128, 256, 1) self.econv4 = Conv2d(256, 512, 1)
def __init__(self, Infea, mlp, dist, nsample, use_xyz=True, use_feature=True, npoint=280): super(PointNetModule, self).__init__() self.dist = dist self.nsample = nsample self.use_xyz = use_xyz self.npoint = npoint if Infea > 0: use_feature = True else: use_feature = False self.use_feature = use_feature self.query_depth_point = QueryDepthPoint(dist, npoint) if self.use_xyz: self.conv1 = Conv2d(Infea + 3, mlp[0], 1) else: self.conv1 = Conv2d(Infea, mlp[0], 1) self.conv2 = Conv2d(mlp[0], mlp[1], 1) self.conv3 = Conv2d(mlp[1], mlp[2], 1) init_params([self.conv1[0], self.conv2[0], self.conv3[0]], 'kaiming_normal') init_params([self.conv1[1], self.conv2[1], self.conv3[1]], 1)
def __init__(self, in_channels, out_channels, kernel_size, padding, data_format="channels_last", **kwargs): super(DPNInitBlock, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=2, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bn = dpn_batch_norm(channels=out_channels, data_format=data_format, name="bn") self.activ = nn.ReLU() self.pool = MaxPool2d(pool_size=3, strides=2, padding=1, data_format=data_format, name="pool")
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation, activate, data_format="channels_last", **kwargs): super(DRNConv, self).__init__(**kwargs) self.activate = activate self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, use_bias=False, data_format=data_format, name="conv") self.bn = BatchNorm(data_format=data_format, name="bn") if self.activate: self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, use_ibn=False, return_preact=False, data_format="channels_last", **kwargs): super(IBNPreConvBlock, self).__init__(**kwargs) self.use_ibn = use_ibn self.return_preact = return_preact if self.use_ibn: self.ibn = IBN(channels=in_channels, first_fraction=0.6, inst_first=False, data_format=data_format, name="ibn") else: self.bn = BatchNorm(data_format=data_format, name="bn") self.activ = nn.ReLU() self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, data_format=data_format, name="conv")
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, use_ibn=False, activate=True, data_format="channels_last", **kwargs): super(IBNConvBlock, self).__init__(**kwargs) self.activate = activate self.use_ibn = use_ibn self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") if self.use_ibn: self.ibn = IBN(channels=out_channels, data_format=data_format, name="ibn") else: self.bn = BatchNorm(data_format=data_format, name="bn") if self.activate: self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, activate=True, data_format="channels_last", **kwargs): super(IBNbConvBlock, self).__init__(**kwargs) self.activate = activate self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, dilation=dilation, groups=groups, use_bias=use_bias, data_format=data_format, name="conv") self.inst_norm = InstanceNorm(scale=True, data_format=data_format, name="inst_norm") if self.activate: self.activ = nn.ReLU()
def __init__(self, channels, residuals, init_block_kernel_size, init_block_channels, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(SqueezeNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") self.features.add( SqueezeInitBlock(in_channels=in_channels, out_channels=init_block_channels, kernel_size=init_block_kernel_size, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) stage.add( MaxPool2d(pool_size=3, strides=2, ceil_mode=True, data_format=data_format, name="pool{}".format(i + 1))) for j, out_channels in enumerate(channels_per_stage): expand_channels = out_channels // 2 squeeze_channels = out_channels // 8 stage.add( FireUnit(in_channels=in_channels, squeeze_channels=squeeze_channels, expand1x1_channels=expand_channels, expand3x3_channels=expand_channels, residual=((residuals is not None) and (residuals[i][j] == 1)), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add(nn.Dropout(rate=0.5, name="dropout")) self.output1 = tf.keras.Sequential(name="output1") self.output1.add( Conv2d(in_channels=in_channels, out_channels=classes, kernel_size=1, data_format=data_format, name="final_conv")) self.output1.add(nn.ReLU()) self.output1.add( nn.AveragePooling2D(pool_size=13, strides=1, data_format=data_format, name="final_pool"))
def __init__(self, channels, odd_pointwise, avg_pool_size, cls_activ, alpha=0.1, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DarkNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): stage.add( dark_convYxY(in_channels=in_channels, out_channels=out_channels, alpha=alpha, pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ odd_pointwise), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels if i != len(channels) - 1: stage.add( MaxPool2d(pool_size=2, strides=2, data_format=data_format, name="pool{}".format(i + 1))) self.features.add(stage) self.output1 = tf.keras.Sequential(name="output1") self.output1.add( Conv2d(in_channels=in_channels, out_channels=classes, kernel_size=1, data_format=data_format, name="final_conv")) if cls_activ: self.output1.add(nn.LeakyReLU(alpha=alpha)) self.output1.add( nn.AveragePooling2D(pool_size=avg_pool_size, strides=1, data_format=data_format, name="final_pool"))
def __init__(self, channels, init_block_channels, dilations, bottlenecks, simplifieds, residuals, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(DRN, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") self.features.add( drn_init_block(in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add( DRNUnit(in_channels=in_channels, out_channels=out_channels, strides=strides, dilation=dilations[i][j], bottleneck=(bottlenecks[i][j] == 1), simplified=(simplifieds[i][j] == 1), residual=(residuals[i][j] == 1), data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add( nn.AveragePooling2D(pool_size=28, strides=1, data_format=data_format, name="final_pool")) self.output1 = Conv2d(in_channels=in_channels, out_channels=classes, kernel_size=1, data_format=data_format, name="output1")
def __init__(self, in_channels, out_channels, kernel_size, data_format="channels_last", **kwargs): super(SqueezeInitBlock, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=2, data_format=data_format, name="conv") self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=0, data_format="channels_last", **kwargs): super(DwsConv, self).__init__(**kwargs) self.dw_conv = Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=in_channels, use_bias=False, data_format=data_format, name="dw_conv") self.pw_conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, use_bias=False, data_format=data_format, name="pw_conv")
def __init__(self, in_channels, out_channels, kernel_size, padding, data_format="channels_last", **kwargs): super(FireConv, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding, data_format=data_format, name="conv") self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, data_format="channels_last", **kwargs): super(DiracConv, self).__init__(**kwargs) self.activ = nn.ReLU() self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=True, data_format=data_format, name="conv")
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, data_format="channels_last", **kwargs): super(InceptConv, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bn = BatchNorm(epsilon=1e-3, data_format=data_format, name="bn") self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, data_format="channels_last", **kwargs): super(DiracInitBlock, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=7, strides=2, padding=3, use_bias=True, data_format=data_format, name="conv") self.pool = MaxPool2d(pool_size=3, strides=2, padding=1, data_format=data_format, name="pool")
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, dilation=1, groups=1, use_bias=False, axis=1, data_format="channels_last", **kwargs): super(MixConv, self).__init__(**kwargs) kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] padding = padding if isinstance(padding, list) else [padding] kernel_count = len(kernel_size) self.splitted_in_channels = self.split_channels( in_channels, kernel_count) splitted_out_channels = self.split_channels(out_channels, kernel_count) self.axis = axis self.convs = [] for i, kernel_size_i in enumerate(kernel_size): in_channels_i = self.splitted_in_channels[i] out_channels_i = splitted_out_channels[i] padding_i = padding[i] self.convs.append( Conv2d(in_channels=in_channels_i, out_channels=out_channels_i, kernel_size=kernel_size_i, strides=strides, padding=padding_i, dilation=dilation, groups=(out_channels_i if out_channels == groups else groups), use_bias=use_bias, data_format=data_format, name="conv{}".format(i + 1)))
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, num_blocks, data_format="channels_last", **kwargs): super(PolyConv, self).__init__(**kwargs) self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, use_bias=False, data_format=data_format, name="conv") self.bns = [] for i in range(num_blocks): self.bns.append( BatchNorm(data_format=data_format, name="bn{}".format(i + 1))) self.activ = nn.ReLU()
def __init__(self, in_channels, out_channels, kernel_size, strides, padding, groups, data_format="channels_last", **kwargs): super(DPNConv, self).__init__(**kwargs) self.bn = dpn_batch_norm(channels=in_channels, data_format=data_format, name="bn") self.activ = nn.ReLU() self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, strides=strides, padding=padding, groups=groups, use_bias=False, data_format=data_format, name="conv")