def __init__(self, inc, outc, size, padding=1): super(Conv_Block, self).__init__() self.c1 = Conv2D(num_channels=inc, num_filters=outc, filter_size=size, padding=padding) self.bn = BatchNorm(num_channels=outc, act='relu', in_place=True)
def __init__(self, num_channels, filter_size, num_filters, stride, padding, channels=None, num_groups=1, act='relu', use_cudnn=True, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, act=None, use_cudnn=use_cudnn, param_attr=ParamAttr(initializer=MSRA(), name=self.full_name() + "_weights"), bias_attr=False) self._batch_norm = BatchNorm( num_filters, act=act, param_attr=ParamAttr(name=self.full_name() + "_bn" + "_scale"), bias_attr=ParamAttr(name=self.full_name() + "_bn" + "_offset"), moving_mean_name=self.full_name() + "_bn" + '_mean', moving_variance_name=self.full_name() + "_bn" + '_variance')
def __init__(self, input_channels, output_channels, filter_size, stride=1, padding=0, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=input_channels, num_filters=output_channels, filter_size=filter_size, stride=stride, padding=padding, param_attr=ParamAttr(name=name + "/weights"), bias_attr=False) self._bn = BatchNorm( num_features=output_channels, epsilon=1e-3, momentum=0.99, weight_attr=ParamAttr(name=name + "/BatchNorm/gamma"), bias_attr=ParamAttr(name=name + "/BatchNorm/beta")) self._act_op = layer_utils.Activation(act=act)
def __init__(self, num_channels=64, num_filters=64, padding=0, pooltype=None, args=None): super(Conv_block, self).__init__() self.args = args self.conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=3, stride=1, padding=padding) self.batch_norm = BatchNorm(num_filters) self.pooling = Pool2D(pool_size=2, pool_stride=2, pool_type=pooltype)
def __init__(self, name_scope, ch_out, filter_size, stride, padding, act='relu'): super(conv_bn_layer, self).__init__(name_scope) self._conv = Conv2D( name_scope, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, act=None, param_attr=ParamAttr(name=name_scope + "_weights"), bias_attr=ParamAttr(name=name_scope + "_biases")) if name_scope == "conv1": bn_name = "bn_" + name_scope else: bn_name = "bn" + name_scope[3:] self._bn = BatchNorm( bn_name + '.output.1', num_channels=ch_out, act=act, param_attr=ParamAttr(name=bn_name + '_scale'), bias_attr=ParamAttr(bn_name + '_offset'), moving_mean_name=bn_name + '_mean', moving_variance_name=bn_name + '_variance', is_test=True)
def __init__(self, img_size=256, img_ch=3, style_dim=64, max_conv_dim=512, sn=False, w_hpf=0): super(Generator, self).__init__() self.img_size = img_size self.img_ch = img_ch self.style_dim = style_dim self.max_conv_dim = max_conv_dim self.sn = sn self.channels = 2**14 // img_size # if 256 -> 64 self.w_hpf = w_hpf self.repeat_num = int(np.log2(img_size)) - 4 # if 256 -> 4 if self.w_hpf == 1: self.repeat_num += 1 self.from_rgb = Conv2D(num_channels=self.img_ch, num_filters=self.channels, filter_size=3, padding=1, param_attr=weight_initializer, bias_attr=bias_initializer, stride=1, act=None) self.encode, self.decode, self.to_rgb = self.architecture_init()
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act="relu", name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, param_attr=ParamAttr( initializer=Normal(scale=0.001), name=name + "_weights"), bias_attr=False) bn_name = name + '_bn' self._batch_norm = BatchNorm( num_filters, weight_attr=ParamAttr(name=bn_name + '_scale', initializer=fluid.initializer.Constant(1.0)), bias_attr=ParamAttr(bn_name + '_offset', initializer=fluid.initializer.Constant(0.0))) self.act = act
def __init__(self, ch_in, ch_out, filter_size=3, stride=1, groups=1, padding=0, act="leaky", is_test=True): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=ch_in, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, param_attr=None, bias_attr=False, act=None) self.batch_norm = BatchNorm(num_channels=ch_out, is_test=is_test, param_attr=None, bias_attr=None) self.act = act
def __init__(self, backbone, num_classes, in_channels, channels=None, pretrained_model=None, ignore_index=255, **kwargs): super(FCN, self).__init__() self.num_classes = num_classes self.ignore_index = ignore_index self.EPS = 1e-5 if channels is None: channels = in_channels self.backbone = manager.BACKBONES[backbone](**kwargs) self.conv_last_2 = ConvBNLayer(num_channels=in_channels, num_filters=channels, filter_size=1, stride=1, name='conv-2') self.conv_last_1 = Conv2D(num_channels=channels, num_filters=self.num_classes, filter_size=1, stride=1, padding=0, param_attr=ParamAttr( initializer=Normal(scale=0.001), name='conv-1_weights')) self.init_weight(pretrained_model)
def __init__(self, name_scope, num_filters, filter_size, padding): """ num_convs, 卷积层的数目 num_filters, 卷积层的输出通道数,在同一个Incepition块内,卷积层输出通道数是一样的 """ super(vgg_block, self).__init__(name_scope) self.conv_list = [] for i in range(2): conv_layer = self.add_sublayer( 'conv_' + str(i), Conv2D(self.full_name(), num_filters=num_filters, filter_size=filter_size, padding=padding)) batch_norm = self.add_sublayer( 'bn_' + str(i), BatchNorm(self.full_name(), num_channels=num_filters, act='relu')) self.conv_list.append(conv_layer) self.conv_list.append(batch_norm) self.pool = Pool2D(self.full_name(), pool_stride=2, pool_size=2, pool_type='max')
def __init__(self, height, width, with_r, with_boundary, in_channels, first_one=False, out_channels=256, kernel_size=1, stride=1, padding=0): super(CoordConvTh, self).__init__() self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.addcoords = AddCoordsTh(height, width, with_r, with_boundary) in_channels += 2 if with_r: in_channels += 1 if with_boundary and not first_one: in_channels += 2 self.conv = Conv2D(num_channels=in_channels, num_filters=self.out_channels, filter_size=self.kernel_size, stride=self.stride, padding=self.padding)
def __init__(self, num_channels, num_filters, filter_size, padding=0, stride=1, groups=None, act=None, name=None): super(ConvLayer, self).__init__() param_attr, bias_attr = initial_type( name=name, input_channels=num_channels, use_bias=True, filter_size=filter_size) self.num_filters = num_filters self._conv = Conv2D( num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, padding=padding, stride=stride, groups=groups, act=act, param_attr=param_attr, bias_attr=bias_attr)
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=None, act=None, param_attr=fluid.param_attr.ParamAttr(name=name + "_weights"), bias_attr=False) if name == "conv1": bn_name = "bn_" + name else: bn_name = "bn" + name[3:] self._batch_norm = BatchNorm( num_filters, act=act, param_attr=ParamAttr(name=bn_name + "_scale"), #fluid.param_attr.ParamAttr(), bias_attr=ParamAttr(bn_name + "_offset"), #fluid.param_attr.ParamAttr()) moving_mean_name=bn_name + "_mean", moving_variance_name=bn_name + "_variance")
def __init__(self, num_channels, num_filters, filter_size, conv_stride=1, conv_padding=0, conv_dilation=1, conv_groups=1, act=None, use_cudnn=False, param_attr=None, bias_attr=None): super(SimpleImgConv, self).__init__() self._conv2d = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=conv_stride, padding=conv_padding, dilation=conv_dilation, groups=conv_groups, param_attr=None, bias_attr=None, act=act, use_cudnn=use_cudnn)
def __init__(self, input_channels, output_channels, filter_size, stride=1, padding=0, act=None, name=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D(num_channels=input_channels, num_filters=output_channels, filter_size=filter_size, stride=stride, padding=padding, param_attr=ParamAttr(name=name + "/weights"), bias_attr=False) self._bn = BatchNorm( num_channels=output_channels, act=act, epsilon=1e-3, momentum=0.99, param_attr=ParamAttr(name=name + "/BatchNorm/gamma"), bias_attr=ParamAttr(name=name + "/BatchNorm/beta"), moving_mean_name=name + "/BatchNorm/moving_mean", moving_variance_name=name + "/BatchNorm/moving_variance")
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): super(ConvBNLayer, self).__init__() self._conv = Conv2D( num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=None, act=None, param_attr=fluid.param_attr.ParamAttr(), bias_attr=False) self._batch_norm = BatchNorm( num_filters, act=act, param_attr=fluid.param_attr.ParamAttr(), bias_attr=fluid.param_attr.ParamAttr())
def __init__(self, name_scope, num_channels, num_filters, filter_size, stride=1, groups=1, act='relu', is_3d=False): super(ConvBNLayer, self).__init__(name_scope) self._conv = None if is_3d: self._conv = Conv3D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, act=None, bias_attr=False) else: self._conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, act=None, bias_attr=False) self._batch_norm = BatchNorm(num_filters, act=act)
def __init__(self, num_channels, num_filters=64, filter_size=7, stride=1, stddev=0.02, padding=0, norm=True, norm_layer=InstanceNorm, relu=True, relufactor=0.0, use_bias=False): super(conv2d, self).__init__() if use_bias == False: con_bias_attr = False else: con_bias_attr = fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0)) self.conv = Conv2D(num_channels=num_channels, num_filters=int(num_filters), filter_size=int(filter_size), stride=stride, padding=padding, use_cudnn=use_cudnn, param_attr=fluid.ParamAttr( initializer=fluid.initializer.NormalInitializer( loc=0.0, scale=stddev)), bias_attr=con_bias_attr) if norm_layer == InstanceNorm: self.bn = InstanceNorm( num_channels=num_filters, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.0), trainable=False), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0), trainable=False), ) elif norm_layer == BatchNorm: self.bn = BatchNorm( num_channels=num_filters, param_attr=fluid.ParamAttr(initializer=fluid.initializer. NormalInitializer(1.0, 0.02)), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0)), ) else: raise NotImplementedError self.relufactor = relufactor self.use_bias = use_bias self.norm = norm if relu: if relufactor == 0.0: self.lrelu = ReLU() else: self.lrelu = Leaky_ReLU(self.relufactor) self.relu = relu
def __init__(self, ch_in, ch_out, filter_size=3, stride=1, groups=1, padding=0, act="leaky"): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=ch_in, num_filters=ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, param_attr=ParamAttr( initializer=fluid.initializer.Normal(0., 0.02)), bias_attr=False, act=None) self.batch_norm = BatchNorm( num_channels=ch_out, param_attr=ParamAttr(initializer=fluid.initializer.Normal( 0., 0.02), regularizer=L2Decay(0.)), bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), regularizer=L2Decay(0.))) self.act = act
def __init__(self, in_channels, num_filters, filter_size, stride=1, padding=0, groups=1, act='relu', name=None): super(ConvBNLayer, self).__init__() self.conv = Conv2D(num_channels=in_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=groups, act=None, param_attr=ParamAttr(name=name + ".conv.weight"), bias_attr=False) self.bn = BatchNorm(num_filters, act=act, epsilon=0.001, param_attr=ParamAttr(name=name + ".bn.weight"), bias_attr=ParamAttr(name=name + ".bn.bias"), moving_mean_name=name + '.bn.running_mean', moving_variance_name=name + '.bn.running_var')
def __init__(self, num_channels, num_filters, filter_size, stride=1, groups=1, act=None): """ num_channels, 卷积层的输入通道数 num_filters, 卷积层的输出通道数 stride, 卷积层的步幅 groups, 分组卷积的组数,默认groups=1不使用分组卷积 act, 激活函数类型,默认act=None不使用激活函数 """ super(ConvBNLayer, self).__init__() # 创建卷积层 self.conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, act=None, bias_attr=False) # 创建BatchNorm层 self.batch_norm = BatchNorm(num_channels=num_filters, act=act)
def __init__(self, dict_dim, emb_dim=128, hid_dim=128, fc_hid_dim=96, class_dim=2, channels=1, win_size=(3, 128)): super(CNN, self).__init__() self.dict_dim = dict_dim self.emb_dim = emb_dim self.hid_dim = hid_dim self.fc_hid_dim = fc_hid_dim self.class_dim = class_dim self.channels = channels self.win_size = win_size self.embedding = Embedding(size=[self.dict_dim + 1, self.emb_dim], dtype='float64', is_sparse=False, padding_idx=0) self._conv2d = Conv2D(num_channels=self.channels, num_filters=self.hid_dim, filter_size=win_size, padding=[1, 0], use_cudnn=True, act=None, dtype="float64") self._fc_1 = Linear(input_dim=self.hid_dim, output_dim=self.fc_hid_dim, dtype="float64") self._fc_2 = Linear(input_dim=self.fc_hid_dim, output_dim=self.class_dim, act="softmax", dtype="float64")
def __init__(self, input_channels, output_channels, stride, filter, dilation=1, act=None, name=None): super(Seperate_Conv, self).__init__() self._conv1 = Conv2D(num_channels=input_channels, num_filters=input_channels, filter_size=filter, stride=stride, groups=input_channels, padding=(filter) // 2 * dilation, dilation=dilation, param_attr=ParamAttr(name=name + "/depthwise/weights"), bias_attr=False) self._bn1 = BatchNorm( input_channels, act=act, epsilon=1e-3, momentum=0.99, param_attr=ParamAttr(name=name + "/depthwise/BatchNorm/gamma"), bias_attr=ParamAttr(name=name + "/depthwise/BatchNorm/beta"), moving_mean_name=name + "/depthwise/BatchNorm/moving_mean", moving_variance_name=name + "/depthwise/BatchNorm/moving_variance") self._conv2 = Conv2D(input_channels, output_channels, 1, stride=1, groups=1, padding=0, param_attr=ParamAttr(name=name + "/pointwise/weights"), bias_attr=False) self._bn2 = BatchNorm( output_channels, act=act, epsilon=1e-3, momentum=0.99, param_attr=ParamAttr(name=name + "/pointwise/BatchNorm/gamma"), bias_attr=ParamAttr(name=name + "/pointwise/BatchNorm/beta"), moving_mean_name=name + "/pointwise/BatchNorm/moving_mean", moving_variance_name=name + "/pointwise/BatchNorm/moving_variance")
def __init__(self, input_channel, output_channel, filter_size=3, stride=1, relu=False): super(DepthwiseConv, self).__init__() self.depthwiseConvBN = fluid.dygraph.Sequential( Conv2D(input_channel, output_channel, filter_size=filter_size // 2, stride=stride, groups=input_channel), BatchNorm(num_channels=output_channel), ) self.relu = relu
def __init__(self, group, out_ch, channels, act="relu", is_test=False, pool=True, use_cudnn=True): super(ConvBNPool, self).__init__() self.group = group self.pool = pool filter_size = 3 conv_std_0 = (2.0 / (filter_size**2 * channels[0]))**0.5 conv_param_0 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_0)) conv_std_1 = (2.0 / (filter_size**2 * channels[1]))**0.5 conv_param_1 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_1)) self.conv_0_layer = Conv2D(channels[0], out_ch[0], 3, padding=1, param_attr=conv_param_0, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_0_layer = BatchNorm(out_ch[0], act=act, is_test=is_test) self.conv_1_layer = Conv2D(out_ch[0], num_filters=out_ch[1], filter_size=3, padding=1, param_attr=conv_param_1, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_1_layer = BatchNorm(out_ch[1], act=act, is_test=is_test) if self.pool: self.pool_layer = Pool2D(pool_size=2, pool_type='max', pool_stride=2, use_cudnn=use_cudnn, ceil_mode=True)
def __init__(self, name_scope, in_channel, out_channel, opt): super(SpectralConv, self).__init__(name_scope) self._conv = Conv2D("%s_conv"%(name_scope), num_filters=out_channel, filter_size=opt.ker_size, padding = opt.padd_size, stride = 1) self.spectralNorm = SpectralNorm('%s_sn'%(name_scope), dim=1, power_iters=1)
def __init__(self, num_classes=1): super(AlexNet, self).__init__() # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征 # 与LeNet不同的是激活函数换成了‘relu’ 作用:通过加权的输入进行非线性组合产生非线性决策边界 self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=5, act='relu') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') #输出神经元个数 self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu') self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu') # 7*7*256 self.drop_ratio1 = 0.5 self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu') self.drop_ratio2 = 0.5 self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [Pool2D(pool_size=2, pool_stride=2)] else: if batch_norm: conv2d = Conv2D(in_channels, v, filter_size=3, padding=1) layers += [conv2d, BatchNorm(v, act='relu')] else: conv2d = Conv2D( in_channels, v, filter_size=3, padding=1, act='relu') layers += [conv2d] in_channels = v return Sequential(*layers)
def __init__(self, c_in, c_out, kernel_size, stride, padding, affine=True): super(SepConv, self).__init__() self.conv1 = Conv2D( num_channels=c_in, num_filters=c_in, filter_size=kernel_size, stride=stride, padding=padding, groups=c_in, use_cudnn=False, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) self.conv2 = Conv2D( num_channels=c_in, num_filters=c_in, filter_size=1, stride=1, padding=0, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) gama, beta = bn_param_config(affine) self.bn1 = BatchNorm(num_channels=c_in, param_attr=gama, bias_attr=beta) self.conv3 = Conv2D( num_channels=c_in, num_filters=c_in, filter_size=kernel_size, stride=1, padding=padding, groups=c_in, use_cudnn=False, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) self.conv4 = Conv2D( num_channels=c_in, num_filters=c_out, filter_size=1, stride=1, padding=0, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False) gama, beta = bn_param_config(affine) self.bn2 = BatchNorm(num_channels=c_out, param_attr=gama, bias_attr=beta)
def architecture_init(self): layers = [] layers.append(BatchNorm(self.in_planes)) layers.append(Relu()) layers.append(Conv2D(self.in_planes, self.out_planes, 1, 1, bias_attr=bias_initializer_1x1)) downsample = Sequential(*layers) return downsample