def __init__(self, num_modules=1, end_relu=False, num_landmarks=98, fname_pretrained=None): super(FAN, self).__init__() self.num_modules = num_modules self.end_relu = end_relu # Base part self.conv1 = CoordConvTh(256, 256, True, False, in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3) self.bn1 = BatchNorm(64) self.conv2 = ConvBlock(64, 128) self.conv3 = ConvBlock(128, 128) self.conv4 = ConvBlock(128, 256) # Stacking part self.add_sublayer('m0', HourGlass(1, 4, 256, first_one=True)) self.add_sublayer('top_m_0', ConvBlock(256, 256)) self.add_sublayer('conv_last0', Conv2D(256, 256, 1, 1, 0)) self.add_sublayer('bn_end0', BatchNorm(256)) self.add_sublayer('l0', Conv2D(256, num_landmarks + 1, 1, 1, 0)) if fname_pretrained is not None: self.load_pretrained_weights(fname_pretrained)
def __init__(self, num_classes=1000): super(MobileNetV3_Large, self).__init__() self.conv1 = Conv2D(3, 16, filter_size=3, stride=2, padding=1) self.bn1 = BatchNorm(16) self.hs1 = hswish() self.bneck = fluid.dygraph.Sequential( Block(3, 16, 16, 16, relu(), None, 1), Block(3, 16, 64, 24, relu(), None, 2), Block(3, 24, 72, 24, relu(), None, 1), Block(5, 24, 72, 40, relu(), SeModule(40), 2), Block(5, 40, 120, 40, relu(), SeModule(40), 1), Block(5, 40, 120, 40, relu(), SeModule(40), 1), Block(3, 40, 240, 80, hswish(), None, 2), Block(3, 80, 200, 80, hswish(), None, 1), Block(3, 80, 184, 80, hswish(), None, 1), Block(3, 80, 184, 80, hswish(), None, 1), Block(3, 80, 480, 112, hswish(), SeModule(112), 1), Block(3, 112, 672, 112, hswish(), SeModule(112), 1), Block(5, 112, 672, 160, hswish(), SeModule(160), 1), Block(5, 160, 672, 160, hswish(), SeModule(160), 2), Block(5, 160, 960, 160, hswish(), SeModule(160), 1), ) self.conv2 = Conv2D(160, 960, filter_size=1, stride=1, padding=0) self.bn2 = BatchNorm(960) self.hs2 = hswish() self.linear3 = Linear(960, 1280) self.bn3 = BatchNorm(1280) self.hs3 = hswish() self.linear4 = Linear(1280, num_classes,act='softmax')
def __init__(self, num_classes=1000): super(MobileNetV3_Small, self).__init__() self.conv1 = Conv2D(3, 16, filter_size=3, stride=2, padding=1) self.bn1 = BatchNorm(16) self.hs1 = hswish() self.bneck = fluid.dygraph.Sequential( Block(3, 16, 16, 16, relu(), SeModule(16), 2), Block(3, 16, 72, 24, relu(), None, 2), Block(3, 24, 88, 24, relu(), None, 1), Block(5, 24, 96, 40, hswish(), SeModule(40), 2), Block(5, 40, 240, 40, hswish(), SeModule(40), 1), Block(5, 40, 240, 40, hswish(), SeModule(40), 1), Block(5, 40, 120, 48, hswish(), SeModule(48), 1), Block(5, 48, 144, 48, hswish(), SeModule(48), 1), Block(5, 48, 288, 96, hswish(), SeModule(96), 2), Block(5, 96, 576, 96, hswish(), SeModule(96), 1), Block(5, 96, 576, 96, hswish(), SeModule(96), 1), ) self.conv2 = Conv2D(96, 576, filter_size=1, stride=1, padding=0) self.bn2 = BatchNorm(576) self.hs2 = hswish() self.linear3 = Linear(576, 1280) self.bn3 = BatchNorm(1280) self.hs3 = hswish() self.linear4 = Linear(1280, num_classes,act='softmax')
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False): super(_DenseLayer_rpn, self).__init__() self.add_sublayer('norm1', BatchNorm(num_input_features, act='relu')) self.add_sublayer( 'conv1', Conv2D( num_input_features, bn_size * growth_rate, filter_size=1, stride=1, bias_attr=False)) self.add_sublayer('norm2', BatchNorm(bn_size * growth_rate, act='relu')) self.add_sublayer( 'conv2', Conv2D( bn_size * growth_rate, growth_rate, filter_size=3, dilation=2, stride=1, padding=2, bias_attr=False)) self.drop_rate = float(drop_rate) self.memory_efficient = memory_efficient
def __init__(self, name_scope, groups=1): super(Res3, self).__init__(name_scope) self._conv1 = Conv3D(num_channels=96, num_filters=128, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm1 = BatchNorm(128, act='relu') self._conv2 = Conv3D(num_channels=128, num_filters=128, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm2 = BatchNorm(128, act='relu') self._conv3 = Conv3D(num_channels=128, num_filters=128, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm3 = BatchNorm(128, act='relu')
def __init__(self): super(res3a, self).__init__() self.res3a_2 = conv3d(96, 128, stride=1) self.res3a_bn = BatchNorm(128, act='relu') self.res3b_1 = conv3d(128, 128, stride=1) self.res3b_1_bn = BatchNorm(128, act='relu') self.res3b_2 = conv3d(128, 128, stride=1) self.res3b_bn = BatchNorm(128, act='relu')
def __init__(self, in_planes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3x3(in_planes, planes, stride) self.bn1 = BatchNorm(planes) self.conv2 = conv3x3x3(planes, planes) self.bn2 = BatchNorm(planes) self.downsample = downsample self.stride = stride
def __init__(self, name_scope, group, out_ch, channels, act="relu", is_test=False, pool=True, use_cudnn=True): super(ConvBNPool, self).__init__(name_scope) self.group = group self.pool = pool filter_size = 3 conv_std_0 = (2.0 / (filter_size**2 * channels[0]))**0.5 conv_param_0 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_0)) conv_std_1 = (2.0 / (filter_size**2 * channels[1]))**0.5 conv_param_1 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_1)) self.conv_0_layer = Conv2D( self.full_name(), channels[0], out_ch[0], 3, padding=1, param_attr=conv_param_0, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_0_layer = BatchNorm( self.full_name(), out_ch[0], act=act, is_test=is_test) self.conv_1_layer = Conv2D( self.full_name(), num_channels=channels[1], num_filters=out_ch[1], filter_size=3, padding=1, param_attr=conv_param_1, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_1_layer = BatchNorm( self.full_name(), out_ch[1], act=act, is_test=is_test) print( "pool", self.pool) if self.pool: self.pool_layer = Pool2D( self.full_name(), pool_size=2, pool_type='max', pool_stride=2, use_cudnn=use_cudnn, ceil_mode=True)
def __init__(self, in_size, reduction=4): super(SeModule, self).__init__() # self.avg_pool = nn.AdaptiveAvgPool2d(1) self.se = fluid.dygraph.Sequential( Conv2D(in_size, in_size // reduction, filter_size=1, stride=1, padding=0), BatchNorm(in_size // reduction,act='relu'), Conv2D(in_size // reduction, in_size, filter_size=1, stride=1, padding=0), BatchNorm(in_size), hsigmoid() )
def __init__(self, in_planes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = conv1x1x1(in_planes, planes) self.bn1 = BatchNorm(planes) self.conv2 = conv3x3x3(planes, planes, stride) self.bn2 = BatchNorm(planes) self.conv3 = conv1x1x1(planes, planes * self.expansion) self.bn3 = BatchNorm(planes * self.expansion) self.downsample = downsample self.stride = stride
def __init__(self, in_planes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3x3(in_planes, planes // 2, stride) self.bn1 = BatchNorm(planes // 2) self.conv2 = conv3x3x3(planes // 2, planes // 2) self.bn2 = BatchNorm(planes // 2) # 另外一半以帧间差方式计算 self.frameSubLayer = FrameSubNet(planes // 2, planes // 2) self.downsample = downsample self.stride = stride
def __init__(self, name_scope, groups=1): super(Res5, self).__init__(name_scope) self._conv1 = Conv3D(num_channels=256, num_filters=512, filter_size=3, stride=2, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm1 = BatchNorm(512, act='relu') self._conv2 = Conv3D(num_channels=512, num_filters=512, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._conv3 = Conv3D(num_channels=256, num_filters=512, filter_size=3, stride=2, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm2 = BatchNorm(512, act='relu') #########################################b self._conv4 = Conv3D(num_channels=512, num_filters=512, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm3 = BatchNorm(512, act='relu') self._conv5 = Conv3D(num_channels=512, num_filters=512, filter_size=3, stride=1, padding=1, groups=groups, act=None, bias_attr=False) self._batch_norm4 = BatchNorm(512, act='relu')
def __init__(self, fc_hidden1=512, fc_hidden2=512, drop_p=0.3, CNN_embed_dim=300): super(CNNEnoder, self).__init__() model_name = "efficientnet-b0" override_params = {"num_classes": 1280} blocks_args, global_params = get_model_params(model_name, override_params=override_params) self.drop_p = drop_p self.BackBone = EfficientNet(blocks_args, global_params) self.fc1 = Linear(1280, fc_hidden1) self.bn1 = BatchNorm(fc_hidden1) self.fc2 = Linear(fc_hidden1, fc_hidden2) self.bn2 = BatchNorm(fc_hidden2) self.fc3 = Linear(fc_hidden2, CNN_embed_dim)
def __init__(self, in_planes, out_planes): super(ConvBlock, self).__init__() self.bn1 = BatchNorm(in_planes) conv3x3 = partial(Conv2D, filter_size=3, stride=1, padding=1, bias_attr=bias_initializer_1x1) self.conv1 = conv3x3(in_planes, int(out_planes / 2)) self.bn2 = BatchNorm(int(out_planes / 2)) self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4)) self.bn3 = BatchNorm(int(out_planes / 4)) self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4)) self.in_planes = in_planes self.out_planes = out_planes self.downsample = None if in_planes != out_planes: self.downsample = self.architecture_init()
def __init__(self, num_channels, filter_size, num_filters, stride, padding, channels=None, num_groups=1, use_cudnn=True): super(ConvBNLayer, self).__init__() tmp_param = ParamAttr(name=self.full_name() + "_weights") self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, act=None, use_cudnn=use_cudnn, param_attr=tmp_param, bias_attr=False) self._batch_norm = BatchNorm( num_filters, param_attr=ParamAttr(name=self.full_name() + "_bn" + "_scale"), bias_attr=ParamAttr(name=self.full_name() + "_bn" + "_offset"), moving_mean_name=self.full_name() + "_bn" + '_mean', moving_variance_name=self.full_name() + "_bn" + '_variance')
def __init__(self, c_in, c_out, kernel_size, stride, padding, dilation, affine=True): super(DilConv, self).__init__() self.conv1 = Conv2D( num_channels=c_in, num_filters=c_in, filter_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=c_in, use_cudnn=False, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) self.conv2 = Conv2D( num_channels=c_in, num_filters=c_out, filter_size=1, padding=0, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) gama, beta = bn_param_config(affine) self.bn1 = BatchNorm(num_channels=c_out, param_attr=gama, bias_attr=beta)
def __init__(self, in_channels, num_filters, filter_size, stride=1, padding=0, groups=1, act='relu', name=None): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=in_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=groups, act=None, param_attr=ParamAttr(name=name + ".conv.weight"), bias_attr=False) self.bn = BatchNorm(num_filters, act=act, epsilon=0.001, param_attr=ParamAttr(name=name + ".bn.weight"), bias_attr=ParamAttr(name=name + ".bn.bias"), moving_mean_name=name + '.bn.running_mean', moving_variance_name=name + '.bn.running_var')
def __init__(self, name_scope, num_filters, filter_size, padding): """ num_convs, 卷积层的数目 num_filters, 卷积层的输出通道数,在同一个Incepition块内,卷积层输出通道数是一样的 """ super(vgg_block, self).__init__(name_scope) self.conv_list = [] for i in range(2): conv_layer = self.add_sublayer( 'conv_' + str(i), Conv2D(self.full_name(), num_filters=num_filters, filter_size=filter_size, padding=padding)) batch_norm = self.add_sublayer( 'bn_' + str(i), BatchNorm(self.full_name(), num_channels=num_filters, act='relu')) self.conv_list.append(conv_layer) self.conv_list.append(batch_norm) self.pool = Pool2D(self.full_name(), pool_stride=2, pool_size=2, pool_type='max')
def __init__(self, ch_in, ch_out, filter_size=3, stride=1, groups=1, padding=0, act="leaky", is_test=True): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=ch_in, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, param_attr=ParamAttr( initializer=fluid.initializer.Normal(0., 0.02)), bias_attr=False, act=None) self.batch_norm = BatchNorm( num_channels=ch_out, is_test=is_test, param_attr=ParamAttr(initializer=fluid.initializer.Normal( 0., 0.02), regularizer=L2Decay(0.)), bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.))) self.act = act
def __init__(self, input_channels, output_channels, filter_size, stride=1, padding=0, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=input_channels, num_filters=output_channels, filter_size=filter_size, stride=stride, padding=padding, param_attr=ParamAttr(name=name + "/weights"), bias_attr=False) self._bn = BatchNorm( num_channels=output_channels, act=act, epsilon=1e-3, momentum=0.99, param_attr=ParamAttr(name=name + "/BatchNorm/gamma"), bias_attr=ParamAttr(name=name + "/BatchNorm/beta"), moving_mean_name=name + "/BatchNorm/moving_mean", moving_variance_name=name + "/BatchNorm/moving_variance")
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): """ num_channels, 卷积层的输入通道数 num_filters, 卷积层的输出通道数 stride, 卷积层的步幅 groups, 分组卷积的组数,默认groups=1不使用分组卷积 act, 激活函数类型,默认act=None不使用激活函数 """ super(ConvBNLayer, self).__init__() # 创建卷积层 self.conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, act=None, bias_attr=False) # 创建BatchNorm层 self.batch_norm = BatchNorm(num_channels=num_filters, act=act)
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D( num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=None, act=None, param_attr=fluid.param_attr.ParamAttr(), bias_attr=False) self._batch_norm = BatchNorm( num_filters, act=act, param_attr=fluid.param_attr.ParamAttr(), bias_attr=fluid.param_attr.ParamAttr())
def __init__(self, name_scope): super(discriminator, self).__init__(name_scope) self.down1 = disc_downsample(self.full_name(), 64, 4, False) self.down2 = disc_downsample(self.full_name(), 128, 4) self.down3 = disc_downsample(self.full_name(), 256, 4) self.conv = Conv2D( self.full_name(), num_filters=512, filter_size=4, stride=1, use_cudnn=use_cudnn, param_attr=fluid.initializer.Normal(), bias_attr=False) self.bn = BatchNorm( self.full_name(), num_channels=512) self.last = Conv2D( self.full_name(), num_filters=2, filter_size=4, stride=1, use_cudnn=use_cudnn, param_attr=fluid.initializer.Normal())
def __init__(self, num_channels, num_filters=64, filter_size=7, stride=1, stddev=0.02, padding=0, norm=True, norm_layer=InstanceNorm, relu=True, relufactor=0.0, use_bias=False): super(conv2d, self).__init__() if use_bias == False: con_bias_attr = False else: con_bias_attr = fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0)) self.conv = Conv2D(num_channels=num_channels, num_filters=int(num_filters), filter_size=int(filter_size), stride=stride, padding=padding, use_cudnn=use_cudnn, param_attr=fluid.ParamAttr( initializer=fluid.initializer.NormalInitializer( loc=0.0, scale=stddev)), bias_attr=con_bias_attr) if norm_layer == InstanceNorm: self.bn = InstanceNorm( num_channels=num_filters, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.0), trainable=False), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0), trainable=False), ) elif norm_layer == BatchNorm: self.bn = BatchNorm( num_channels=num_filters, param_attr=fluid.ParamAttr(initializer=fluid.initializer. NormalInitializer(1.0, 0.02)), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0)), ) else: raise NotImplementedError self.relufactor = relufactor self.use_bias = use_bias self.norm = norm if relu: if relufactor == 0.0: self.lrelu = ReLU() else: self.lrelu = Leaky_ReLU(self.relufactor) self.relu = relu
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=None, act=None, param_attr=fluid.param_attr.ParamAttr(name=name + "_weights"), bias_attr=False) if name == "conv1": bn_name = "bn_" + name else: bn_name = "bn" + name[3:] self._batch_norm = BatchNorm( num_filters, act=act, param_attr=ParamAttr(name=bn_name + "_scale"), #fluid.param_attr.ParamAttr(), bias_attr=ParamAttr(bn_name + "_offset"), #fluid.param_attr.ParamAttr()) moving_mean_name=bn_name + "_mean", moving_variance_name=bn_name + "_variance")
def __init__( self, inplanes, planes, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False, groups=1, norm=False, activation=False, downsample_position='after', downsample_scale=(1, 2, 2), ): super(Downampling, self).__init__() self.conv = Conv3D(inplanes, planes, kernel_size, stride, padding, bias_attr=bias, groups=groups) self.norm1 = norm self.norm = BatchNorm(planes) if norm else None self.activation = activation assert (downsample_position in ['before', 'after']) self.downsample_position = downsample_position self.downsample_scale = downsample_scale
def __init__( self, in_channels=[1024, 1024], mid_channels=[1024, 1024], out_channels=2048, ds_scales=[(1, 1, 1), (1, 1, 1)], ): super(LevelFusion, self).__init__() self.ops = fluid.dygraph.LayerList() num_ins = len(in_channels) for i in range(num_ins): op = Downampling(in_channels[i], mid_channels[i], kernel_size=(1, 1, 1), stride=(1, 1, 1), padding=(0, 0, 0), bias=False, groups=32, norm=True, activation=True, downsample_position='before', downsample_scale=ds_scales[i]) self.ops.append(op) in_dims = np.sum(mid_channels) self.fusion_conv = fluid.dygraph.Sequential( Conv3D(in_dims, out_channels, 1, 1, 0, bias_attr=False), BatchNorm(out_channels, act='relu'), )
def __init__(self, inplanes, planes, kernel_size, stride, padding, bias=False, groups=1): super(ConvModule, self).__init__() self.conv = Conv3D( num_channels=inplanes, num_filters=planes, filter_size=kernel_size, stride=stride, padding=padding, bias_attr=bias, groups=groups, param_attr=fluid.initializer.XavierInitializer(uniform=True, fan_in=None, fan_out=None, seed=0), ) self.bn = BatchNorm( num_channels=planes, act='relu', param_attr=fluid.initializer.ConstantInitializer(value=1.0, force_cpu=False), bias_attr=fluid.initializer.ConstantInitializer(value=0.0, force_cpu=False))
def __init__(self, ch_in, ch_out, filter_size=3, stride=1, groups=1, padding=0, act="leaky", is_test=True): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=ch_in, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, param_attr=None, bias_attr=False, act=None) self.batch_norm = BatchNorm(num_channels=ch_out, is_test=is_test, param_attr=None, bias_attr=None) self.act = act
def __init__(self, inc, outc, size, padding=1): super(Conv_Block, self).__init__() self.c1 = Conv2D(num_channels=inc, num_filters=outc, filter_size=size, padding=padding) self.bn = BatchNorm(num_channels=outc, act='relu', in_place=True)