def __init__(self, name_scope, input_channels, filter_list, pool_mode="avg"): super(InceptionBasic, self).__init__(name_scope) #1*1 self.branch_1 = ConvBNLayer(self.full_name(), num_channels=input_channels, num_filters=filter_list[0], filter_size=1, stride=1, act='relu') #1*1 + 3*3 self.branch_2_a = ConvBNLayer(self.full_name(), num_channels=input_channels, num_filters=filter_list[1], filter_size=1, stride=1, act='relu') self.branch_2_b = ConvBNLayer(self.full_name(), num_channels=filter_list[1], num_filters=filter_list[2], filter_size=3, stride=1, act='relu') #注意padding = 1, 也就是(3-1)//2 #1*1 + 3*3 + 3*3 self.branch_3_a = ConvBNLayer(self.full_name(), num_channels=input_channels, num_filters=filter_list[3], filter_size=1, stride=1, act='relu') self.branch_3_b = ConvBNLayer(self.full_name(), num_channels=filter_list[3], num_filters=filter_list[4], filter_size=3, stride=1, act='relu') self.branch_3_c = ConvBNLayer(self.full_name(), num_channels=filter_list[4], num_filters=filter_list[5], filter_size=3, stride=1, act='relu') #avg_pool3*3 + 1*1 self.branch_4_a = Pool2D(pool_size=3, pool_stride=1, pool_padding=1, pool_type=pool_mode, ceil_mode=True) self.branch_4_b = ConvBNLayer(self.full_name(), num_channels=input_channels, num_filters=filter_list[6], filter_size=1, stride=1, act='relu')
def __init__(self, num_channels=64, num_filters=64, padding=0, pooltype=None, args=None): super(Conv_block, self).__init__() self.args = args self.conv = Conv2D(num_channels=num_channels, num_filters=num_filters, filter_size=3, stride=1, padding=padding) self.batch_norm = BatchNorm(num_filters) self.pooling = Pool2D(pool_size=2, pool_stride=2, pool_type=pooltype)
def __init__(self, num_channels, num_filters, pooltype): super(Residual_Block, self).__init__() self.short_cut = BASIC_BLOCK(num_channels=num_channels, num_filters=num_filters, filter_size=1, padding=0) self.conv0 = BASIC_BLOCK(num_channels=num_channels, num_filters=num_filters, filter_size=3, padding=1) self.conv1 = BASIC_BLOCK(num_channels=num_filters, num_filters=num_filters, filter_size=3, padding=1) self.conv2 = BASIC_BLOCK(num_channels=num_filters, num_filters=num_filters, filter_size=3, padding=1) if pooltype: self.pooling = Pool2D(pool_size=2, pool_stride=2, pool_type=pooltype) else: self.pooling = None
def __init__(self, args): super(ResNet12, self).__init__() self.args = args if self.args.dataset == 'omniglot': input_channels = 1 else: input_channels = 3 if self.args.method == 'relationnet': pooltype = None else: pooltype = self.args.pooling_type self.res_block0 = Residual_Block( num_channels=input_channels, num_filters=self.args.resnet12_num_filters[0], pooltype=self.args.pooling_type) self.res_block1 = Residual_Block( num_channels=self.args.resnet12_num_filters[0], num_filters=self.args.resnet12_num_filters[1], pooltype=self.args.pooling_type) self.res_block2 = Residual_Block( num_channels=self.args.resnet12_num_filters[1], num_filters=self.args.resnet12_num_filters[2], pooltype=pooltype) self.res_block3 = Residual_Block( num_channels=self.args.resnet12_num_filters[2], num_filters=self.args.resnet12_num_filters[3], pooltype=pooltype) self.gap = Pool2D(pool_type='avg', global_pooling=True) if self.args.if_dropout: self.dropout = Dropout(p=0.5)
def __init__(self, in_channels, name=None): super(InceptionD, self).__init__() self.branch3x3_1 = ConvBNLayer(in_channels, 192, 1, name=name + '.branch3x3_1') self.branch3x3_2 = ConvBNLayer(192, 320, 3, stride=2, name=name + '.branch3x3_2') self.branch7x7x3_1 = ConvBNLayer(in_channels, 192, 1, name=name + '.branch7x7x3_1') self.branch7x7x3_2 = ConvBNLayer(192, 192, (1, 7), padding=(0, 3), name=name + '.branch7x7x3_2') self.branch7x7x3_3 = ConvBNLayer(192, 192, (7, 1), padding=(3, 0), name=name + '.branch7x7x3_3') self.branch7x7x3_4 = ConvBNLayer(192, 192, 3, stride=2, name=name + '.branch7x7x3_4') self.branch_pool = Pool2D(pool_size=3, pool_stride=2, pool_type='max')
def __init__(self, num_channels, num_filters, reduction_ratio, name=None): super(SELayer, self).__init__() self.pool2d_gap = Pool2D(pool_type='avg', global_pooling=True) self._num_channels = num_channels med_ch = int(num_channels / reduction_ratio) stdv = 1.0 / math.sqrt(num_channels * 1.0) self.squeeze = Linear(num_channels, med_ch, act="relu", param_attr=ParamAttr( initializer=fluid.initializer.Uniform( -stdv, stdv), name=name + "_sqz_weights"), bias_attr=ParamAttr(name=name + '_sqz_offset')) stdv = 1.0 / math.sqrt(med_ch * 1.0) self.excitation = Linear( med_ch, num_filters, act="sigmoid", param_attr=ParamAttr(initializer=fluid.initializer.Uniform( -stdv, stdv), name=name + "_exc_weights"), bias_attr=ParamAttr(name=name + '_exc_offset'))
def __init__(self, name_scope, c1, c2, c3, c4, **kwargs): ''' Inception模块的实现代码, name_scope, 模块名称,数据类型为string c1, 图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数 c2,图(b)中第二条支路卷积的输出通道数,数据类型是tuple或list, 其中c2[0]是1x1卷积的输出通道数,c2[1]是3x3 c3,图(b)中第三条支路卷积的输出通道数,数据类型是tuple或list, 其中c3[0]是1x1卷积的输出通道数,c3[1]是3x3 c4, 图(b)中第一条支路1x1卷积的输出通道数,数据类型是整数 ''' super(Inception, self).__init__(name_scope) # 依次创建Inception块每条支路上使用到的操作 self.p1_1 = Conv2D(self.full_name(), num_filters=c1, filter_size=1, act='relu') self.p2_1 = Conv2D(self.full_name(), num_filters=c2[0], filter_size=1, act='relu') self.p2_2 = Conv2D(self.full_name(), num_filters=c2[1], filter_size=3, padding=1, act='relu') self.p3_1 = Conv2D(self.full_name(), num_filters=c3[0], filter_size=1, act='relu') self.p3_2 = Conv2D(self.full_name(), num_filters=c3[1], filter_size=5, padding=2, act='relu') self.p4_1 = Pool2D(self.full_name(), pool_size=3, pool_stride=1, pool_padding=1, pool_type='max') self.p4_2 = Conv2D(self.full_name(), num_filters=c4, filter_size=1, act='relu')
def __init__(self, c_cur, stride, method): super(MixedOp, self).__init__() self._method = method self._k = 4 if self._method == "PC-DARTS" else 1 self.mp = Pool2D( pool_size=2, pool_stride=2, pool_type='max', ) ops = [] for primitive in PRIMITIVES: op = OPS[primitive](c_cur // self._k, stride, False) if 'pool' in primitive: gama = ParamAttr( initializer=fluid.initializer.Constant(value=1), trainable=False) beta = ParamAttr( initializer=fluid.initializer.Constant(value=0), trainable=False) BN = BatchNorm(c_cur // self._k, param_attr=gama, bias_attr=beta) op = fluid.dygraph.Sequential(op, BN) ops.append(op) self._ops = fluid.dygraph.LayerList(ops)
def __init__(self, bottleneck_params, in_channels=3, class_dim=1024): super(Xception, self).__init__() self.convbn1 = ConvBN(in_channels, 32, 3, 2, act='relu') self.convbn2 = ConvBN(32, 64, 3, 1, act='relu') in_channel = 64 self.entry_flow, in_channel = self.block_flow( block_num=bottleneck_params['entry_flow'][0], strides=bottleneck_params['entry_flow'][1], chns=bottleneck_params['entry_flow'][2], in_channel=in_channel) self.middle_flow, in_channel = self.block_flow( block_num=bottleneck_params['middle_flow'][0], strides=bottleneck_params['middle_flow'][1], chns=bottleneck_params['middle_flow'][2], in_channel=in_channel) self.exit_flow, in_channel = self.exit_block_flow( block_num=bottleneck_params['exit_flow'][0], strides=bottleneck_params['exit_flow'][1], chns=bottleneck_params['exit_flow'][2], in_channel=in_channel) self.pool = Pool2D(pool_size=7, pool_type='avg', global_pooling=True) self.feature_dim = 2048 import math stdv = 1.0 / math.sqrt(2048 * 1.0) self.linear = Linear(self.feature_dim, class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.Uniform(-stdv, stdv)))
def __init__(self, scale=1.0, num_classes=1000, with_pool=True, classifier_activation='softmax'): super(MobileNetV2, self).__init__() self.scale = scale self.num_classes = num_classes self.with_pool = with_pool bottleneck_params_list = [ (1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1), ] self._conv1 = ConvBNLayer( num_channels=3, num_filters=int(32 * scale), filter_size=3, stride=2, padding=1) self._invl = [] i = 1 in_c = int(32 * scale) for layer_setting in bottleneck_params_list: t, c, n, s = layer_setting i += 1 tmp = self.add_sublayer( sublayer=InvresiBlocks( in_c=in_c, t=t, c=int(c * scale), n=n, s=s), name='conv' + str(i)) self._invl.append(tmp) in_c = int(c * scale) self._out_c = int(1280 * scale) if scale > 1.0 else 1280 self._conv9 = ConvBNLayer( num_channels=in_c, num_filters=self._out_c, filter_size=1, stride=1, padding=0) if with_pool: self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) if num_classes > 0: tmp_param = ParamAttr(name=self.full_name() + "fc10_weights") self._fc = Linear( self._out_c, num_classes, act=classifier_activation, param_attr=tmp_param, bias_attr=ParamAttr(name="fc10_offset"))
def __init__(self, num_classes=1): super(AlexNet, self).__init__() # AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征 # 与LeNet不同的是激活函数换成了‘relu’ 作用:通过加权的输入进行非线性组合产生非线性决策边界 self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=5, act='relu') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') #输出神经元个数 self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu') self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu') # 7*7*256 self.drop_ratio1 = 0.5 self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu') self.drop_ratio2 = 0.5 self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
def __init__(self,c0,c1,c2,c3,c4): super(Inception,self).__init__() self.p1_1 = Conv2D(num_channels=c0,num_filters=c1,filter_size=1,act='relu') self.p2_1 = Conv2D(num_channels=c0,num_filters=c2[0],filter_size=1,act='relu') self.p2_2 = Conv2D(num_channels=c2[0],num_filters=c2[1],filter_size=3,padding=1,act='relu') self.p3_1 = Conv2D(num_channels=c0,num_filters=c3[0],filter_size=1,act='relu') self.p3_2 = Conv2D(num_channels=c3[0],num_filters=c3[1],filter_size=5,padding=2,act='relu') self.p4_1 = Pool2D(pool_size=3,pool_stride=1,pool_padding=1,pool_type='max') self.p4_2 = Conv2D(num_channels=c0,num_filters=c4,filter_size=1,act='relu')
def __init__(self, class_dim=1000, scale=1.0): super(MobileNetV2, self).__init__() self.scale = scale self.class_dim = class_dim bottleneck_params_list = [ (1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1), ] #1. conv1 self._conv1 = ConvBNLayer(num_channels=3, num_filters=int(32 * scale), filter_size=3, stride=2, act=None, padding=1) #2. bottleneck sequences self._invl = [] i = 1 in_c = int(32 * scale) for layer_setting in bottleneck_params_list: t, c, n, s = layer_setting i += 1 tmp = self.add_sublayer(sublayer=InvresiBlocks(in_c=in_c, t=t, c=int(c * scale), n=n, s=s), name='conv' + str(i)) self._invl.append(tmp) in_c = int(c * scale) #3. last_conv self._out_c = int(1280 * scale) if scale > 1.0 else 1280 self._conv9 = ConvBNLayer(num_channels=in_c, num_filters=self._out_c, filter_size=1, stride=1, act=None, padding=0) #4. pool self._pool2d_avg = Pool2D(pool_type='avg', global_pooling=True) #5. fc tmp_param = ParamAttr(name=self.full_name() + "fc10_weights") self._fc = Linear(self._out_c, class_dim, param_attr=tmp_param, bias_attr=ParamAttr(name="fc10_offset"))
def __init__(self, num_classes=10, classifier_activation='softmax'): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( Conv2D(num_channels=1, num_filters=6, filter_size=3, stride=1, padding=1, param_attr=conv2d_w1_attr, bias_attr=conv2d_b1_attr), Pool2D(pool_size=2, pool_type='max', pool_stride=2), Conv2D(num_channels=6, num_filters=16, filter_size=5, stride=1, padding=0, param_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr), Pool2D(pool_size=2, pool_type='max', pool_stride=2)) self.fc = Sequential( Linear(input_dim=400, output_dim=120, param_attr=fc_w1_attr, bias_attr=fc_b1_attr), Linear(input_dim=120, output_dim=84, param_attr=fc_w2_attr, bias_attr=fc_b2_attr), Linear(input_dim=84, output_dim=num_classes, act=classifier_activation, param_attr=fc_w3_attr, bias_attr=fc_b3_attr))
def __init__(self, name_scope, group, out_ch, channels, act="relu", is_test=False, pool=True, use_cudnn=True): super(ConvBNPool, self).__init__(name_scope) self.group = group self.pool = pool filter_size = 3 conv_std_0 = (2.0 / (filter_size**2 * channels[0]))**0.5 conv_param_0 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_0)) conv_std_1 = (2.0 / (filter_size**2 * channels[1]))**0.5 conv_param_1 = fluid.ParamAttr( initializer=fluid.initializer.Normal(0.0, conv_std_1)) self.conv_0_layer = Conv2D( self.full_name(), channels[0], out_ch[0], 3, padding=1, param_attr=conv_param_0, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_0_layer = BatchNorm( self.full_name(), out_ch[0], act=act, is_test=is_test) self.conv_1_layer = Conv2D( self.full_name(), num_channels=channels[1], num_filters=out_ch[1], filter_size=3, padding=1, param_attr=conv_param_1, bias_attr=False, act=None, use_cudnn=use_cudnn) self.bn_1_layer = BatchNorm( self.full_name(), out_ch[1], act=act, is_test=is_test) print( "pool", self.pool) if self.pool: self.pool_layer = Pool2D( self.full_name(), pool_size=2, pool_type='max', pool_stride=2, use_cudnn=use_cudnn, ceil_mode=True)
def __init__(self, name_scope, num_classes=1): super(LeNet, self).__init__(name_scope) self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='relu') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='relu') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') # self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='relu') # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数 self.fc1 = Linear(input_dim=16 * 5 * 5, output_dim=120, act='relu') self.fc2 = Linear(input_dim=120, output_dim=84, act='relu') self.fc3 = Linear(input_dim=84, output_dim=num_classes)
def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( Conv2D(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1, weight_attr=conv2d_w1_attr, bias_attr=conv2d_b1_attr), Pool2D(pool_size=2, pool_type='max', pool_stride=2), Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr), Pool2D(pool_size=2, pool_type='max', pool_stride=2)) self.fc = Sequential( Linear(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr=fc_b1_attr), Linear(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=fc_b2_attr), Linear(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr, bias_attr=fc_b3_attr), Softmax())
def __init__(self, name_scope, num_classes): super(LeNet5, self).__init__(name_scope) self.conv1 = Conv2D(self.full_name(), num_filters=50, filter_size=5, stride=1) self.pool1 = Pool2D(self.full_name(), pool_size=2, pool_stride=1, pool_type='max') self.conv2 = Conv2D(self.full_name(), num_filters=32, filter_size=3, stride=1) self.pool2 = Pool2D(self.full_name(), pool_size=2, pool_stride=1, pool_type='max') self.fc1 = FC(self.full_name(), size=num_classes, act='softmax')
def __init__(self, num_classes=1): super(LeNet, self).__init__() self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='sigmoid') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='sigmoid') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='sigmoid') self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid') self.fc2 = Linear(input_dim=64, output_dim=num_classes)
def __init__(self, num_classes=1): super(AlexNet, self).__init__() self.conv1 = Conv2D(num_channels=3, num_filters=96, filter_size=11, stride=4, padding=5, act='relu') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv2 = Conv2D(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv3 = Conv2D(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv4 = Conv2D(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='relu') self.conv5 = Conv2D(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='relu') self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.fc1 = Linear(input_dim=12544, output_dim=4096, act='relu') self.drop_ratio1 = 0.5 self.fc2 = Linear(input_dim=4096, output_dim=4096, act='relu') self.drop_ratio2 = 0.5 self.fc3 = Linear(input_dim=4096, output_dim=num_classes)
def __init__(self, name_scope, num_filter): super(conv_block_4, self).__init__(name_scope) self.conv1 = Conv2D( self.full_name(), num_filters=num_filter, filter_size=3, stride=1, padding=1, act='relu', use_cudnn=use_cudnn, param_attr=fluid.param_attr.ParamAttr(name="1_weights"), bias_attr=False) self.conv2 = Conv2D( self.full_name(), num_filters=num_filter, filter_size=3, stride=1, padding=1, act='relu', use_cudnn=use_cudnn, param_attr=fluid.param_attr.ParamAttr(name="2_weights"), bias_attr=False) self.conv3 = Conv2D( self.full_name(), num_filters=num_filter, filter_size=3, stride=1, padding=1, act='relu', use_cudnn=use_cudnn, param_attr=fluid.param_attr.ParamAttr(name="3_weights"), bias_attr=False) self.conv4 = Conv2D( self.full_name(), num_filters=num_filter, filter_size=3, stride=1, padding=1, act='relu', use_cudnn=use_cudnn, param_attr=fluid.param_attr.ParamAttr(name="4_weights"), bias_attr=False) self.pool1 = Pool2D(self.full_name(), pool_size=2, pool_type='max', pool_stride=2, use_cudnn=use_cudnn) self.num_filter = num_filter
def __init__(self, c_in, num_classes, layers, method, steps=4, multiplier=4, stem_multiplier=3): super(Network, self).__init__() self._c_in = c_in self._num_classes = num_classes self._layers = layers self._steps = steps self._multiplier = multiplier self._primitives = PRIMITIVES self._method = method c_cur = stem_multiplier * c_in self.stem = fluid.dygraph.Sequential( Conv2D(num_channels=3, num_filters=c_cur, filter_size=3, padding=1, param_attr=fluid.ParamAttr(initializer=MSRAInitializer()), bias_attr=False), BatchNorm(num_channels=c_cur, param_attr=fluid.ParamAttr( initializer=ConstantInitializer(value=1)), bias_attr=fluid.ParamAttr( initializer=ConstantInitializer(value=0)))) c_prev_prev, c_prev, c_cur = c_cur, c_cur, c_in cells = [] reduction_prev = False for i in range(layers): if i in [layers // 3, 2 * layers // 3]: c_cur *= 2 reduction = True else: reduction = False cell = Cell(steps, multiplier, c_prev_prev, c_prev, c_cur, reduction, reduction_prev, method) reduction_prev = reduction cells.append(cell) c_prev_prev, c_prev = c_prev, multiplier * c_cur self.cells = fluid.dygraph.LayerList(cells) self.global_pooling = Pool2D(pool_type='avg', global_pooling=True) self.classifier = Linear( input_dim=c_prev, output_dim=num_classes, param_attr=ParamAttr(initializer=MSRAInitializer()), bias_attr=ParamAttr(initializer=MSRAInitializer())) self._initialize_alphas()
def __init__(self, name_scope, num_filters, num_channels, drop_out_prob): super(TransitionLayer, self).__init__(name_scope) self.conv = BNConvLayer(self.full_name(), num_filters=num_filters, num_channels=num_channels, filter_size=1) self.pool2d = Pool2D(pool_size=2, pool_stride=2, pool_type='avg') self.dropout_prob = drop_out_prob
def __init__(self, depth, in_channels=3, class_dim=1024): super(ResNet, self).__init__() self.depth = depth self.num_channels = [64, 256, 512, 1024] self.num_filters = [64, 128, 256, 512] self.conv = ConvBN(in_channels, 64, filter_size=7, stride=2) self.pool = Pool2D(pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') self.bottleneck_block_list = [] for block in range(len(depth)): shortcut = False for i in range(self.depth[block]): bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BottleneckBlock(num_channels=self.num_channels[block] if i == 0 else self.num_filters[block] * 4, num_filters=self.num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut)) self.bottleneck_block_list.append(bottleneck_block) shortcut = True self.pool_avg = Pool2D(pool_size=7, pool_type='avg', global_pooling=True) self.feature_dim = self.num_filters[-1] * 4 * 1 * 1 import math stdv = 1.0 / math.sqrt(2048 * 1.0) self.linear = Linear( self.feature_dim, class_dim, act='softmax', param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.Uniform(-stdv, stdv)))
def __init__(self, features, num_classes=1000): super(VGG, self).__init__() self.features = features self.avgpool = Pool2D(pool_size=7, pool_stride=1, pool_type='avg') import math stdv = 1.0 / math.sqrt(2048 * 1.0) self.classifier = Linear( 512, num_classes, act='softmax', param_attr=fluid.param_attr.ParamAttr( initializer=fluid.initializer.Uniform(-stdv, stdv)))
def __init__(self, name_scope, num_channels, out_channels1, out_channels2, out_channels3): super(InceptionV2ModuleD, self).__init__(name_scope) # self.branch1 = ConvBNReLU(in_channels=in_channels,out_channels=out_channels1,kernel_size=1) # self.branch1 = ConvBNLayer(self.full_name(),num_channels=num_channels,num_filters=out_channels1,filter_size=1,act='relu') self.branch1 = fluid.dygraph.Sequential( # ConvBNReLU(in_channels=in_channels, out_channels=out_channels2reduce, kernel_size=1), # ConvBNReLU(in_channels=out_channels2reduce, out_channels=out_channels2, kernel_size=3, padding=1), ConvBNLayer(self.full_name(), num_channels=num_channels, num_filters=out_channels1, filter_size=1, act='relu'), ConvBNLayer(self.full_name(), num_channels=out_channels1, num_filters=out_channels2, filter_size=3, stride=2, padding=1, act='relu'), ) self.branch2 = fluid.dygraph.Sequential( #ConvBNReLU(in_channels=in_channels,out_channels=out_channels3reduce,kernel_size=1), #ConvBNReLU(in_channels=out_channels3reduce, out_channels=out_channels3, kernel_size=3), #ConvBNReLU(in_channels=out_channels3, out_channels=out_channels3, kernel_size=3), ConvBNLayer(self.full_name(), num_channels=num_channels, num_filters=out_channels2, filter_size=1, act='relu'), ConvBNLayer(self.full_name(), num_channels=out_channels2, num_filters=out_channels3, filter_size=3, padding=1, act='relu'), ConvBNLayer(self.full_name(), num_channels=out_channels3, num_filters=out_channels3, filter_size=3, stride=2, padding=1, act='relu'), ) self.branch3 = fluid.dygraph.Sequential( #nn.MaxPool2d(kernel_size=3, stride=1, padding=1), #ConvBNReLU(in_channels=in_channels, out_channels=out_channels4, kernel_size=1), Pool2D(pool_size=3, pool_stride=2, pool_padding=1, pool_type='max'), )
def __init__(self, name_scope, num_convs, num_channels): """ num_convs, 卷积层的数目 num_channels, 卷积层的输出通道数,在同一个Incepition块内,卷积层输出通道数是一样的 """ super(vgg_block, self).__init__(name_scope) self.conv_list = [] for i in range(num_convs): conv_layer = self.add_sublayer('conv_' + str(i), Conv2D(self.full_name(), num_filters=num_channels, filter_size=3, padding=1, act='relu')) self.conv_list.append(conv_layer) self.pool = Pool2D(self.full_name(), pool_stride=2, pool_size = 2, pool_type='max')
def __init__(self, input_dim, num_classes, name=None): super(Classifier, self).__init__() self.pool2d = Pool2D(pool_type='avg', global_pooling=True) self.fc = Linear( input_dim=input_dim, output_dim=num_classes, param_attr=fluid.ParamAttr( name=name + "_fc_weights" if name is not None else None, initializer=MSRAInitializer()), bias_attr=fluid.ParamAttr(name=name + "_fc_bias" if name is not None else None, initializer=MSRAInitializer()))
def __init__(self, name_scope): super(MODEL, self).__init__(name_scope) self.conv1 = Conv2D('conv2d1_', num_filters=40, filter_size=5, stride=1, padding=2, act='relu') self.conv2 = Conv2D('conv2d2_', num_filters=50, filter_size=5, stride=1, padding=2, act='relu') self.conv3 = Conv2D('conv2d3_', num_filters=70, filter_size=2, stride=1, padding=1, act='relu') self.conv4 = Conv2D('conv2d4_', num_filters=100, filter_size=2, stride=1, padding=1, act='relu') self.pool2d1 = Pool2D('pool2d1_', pool_size=2, pool_stride=2, pool_type='max') self.pool2d2 = Pool2D('pool2d2_', pool_size=2, pool_stride=2, pool_type='max') self.pool2d3 = Pool2D('pool2d3_', pool_size=2, pool_stride=2, pool_type='max') self.fc1 = FC('fc1_', size=100, act='relu') self.fc2 = FC('fc2_', size=10, act='softmax')
def __init__(self, num_classes=1): super(LeNet, self).__init__() # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化 self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='sigmoid') self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='sigmoid') self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max') # 创建第3个卷积层 self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='sigmoid') # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分类标签的类别数 self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid') self.fc2 = Linear(input_dim=64, output_dim=num_classes)