def __init__(self, args): super(Conv4, self).__init__() self.args = args if self.args.dataset == 'omniglot': input_channels = 1 else: input_channels = 3 if self.args.method == 'relationnet': pooltype = None else: pooltype = self.args.pooling_type self.conv0 = BASIC_BLOCK(num_channels=input_channels, num_filters=self.args.num_filters, pooltype=self.args.pooling_type) self.conv1 = BASIC_BLOCK(num_channels=self.args.num_filters, num_filters=self.args.num_filters, pooltype=self.args.pooling_type) self.conv2 = BASIC_BLOCK(num_channels=self.args.num_filters, num_filters=self.args.num_filters, pooltype=pooltype) self.conv3 = BASIC_BLOCK(num_channels=self.args.num_filters, num_filters=self.args.num_filters, pooltype=pooltype) if self.args.if_dropout: self.dropout = Dropout(p=0.5)
def __init__(self, in_c, out_c, padding_type, norm_layer, dropout_rate, use_bias): super(MobileResnetBlock, self).__init__() self.padding_type = padding_type self.dropout_rate = dropout_rate self.conv_block = fluid.dygraph.LayerList([]) p = 0 if self.padding_type == 'reflect': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')]) elif self.padding_type == 'replicate': self.conv_block.extend([Pad2D(inputs, paddings=[1,1,1,1], mode='edge')]) elif self.padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % self.padding_type) self.conv_block.extend([SeparableConv2D(num_channels=in_c, num_filters=out_c, filter_size=3, padding=p, stride=1), norm_layer(out_c), ReLU()]) self.conv_block.extend([Dropout(p=self.dropout_rate)]) if self.padding_type == 'reflect': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')]) elif self.padding_type == 'replicate': self.conv_block.extend([Pad2D(inputs, paddings=[1,1,1,1], mode='edge')]) elif self.padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % self.padding_type) self.conv_block.extend([SeparableConv2D(num_channels=out_c, num_filters=in_c, filter_size=3, padding=p, stride=1), norm_layer(in_c)])
def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias=False): super(ResnetBlock,self).__init__() self.conv_block = fluid.dygraph.LayerList([]) p = 0 if padding_type == 'reflect': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')]) elif padding_type == 'replicate': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='edge')]) elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) self.conv_block.extend([Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim), ReLU()]) self.conv_block.extend([Dropout(dropout_rate)]) p = 0 if padding_type == 'reflect': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='reflect')]) elif padding_type == 'replicate': self.conv_block.extend([Pad2D(paddings=[1,1,1,1], mode='edge')]) elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) self.conv_block.extend([Conv2D(dim, dim, filter_size=3, padding=p, bias_attr=use_bias), norm_layer(dim)])
def __init__(self, args): super(ResNet12, self).__init__() self.args = args if self.args.dataset == 'omniglot': input_channels = 1 else: input_channels = 3 if self.args.method == 'relationnet': pooltype = None else: pooltype = self.args.pooling_type self.res_block0 = Residual_Block( num_channels=input_channels, num_filters=self.args.resnet12_num_filters[0], pooltype=self.args.pooling_type) self.res_block1 = Residual_Block( num_channels=self.args.resnet12_num_filters[0], num_filters=self.args.resnet12_num_filters[1], pooltype=self.args.pooling_type) self.res_block2 = Residual_Block( num_channels=self.args.resnet12_num_filters[1], num_filters=self.args.resnet12_num_filters[2], pooltype=pooltype) self.res_block3 = Residual_Block( num_channels=self.args.resnet12_num_filters[2], num_filters=self.args.resnet12_num_filters[3], pooltype=pooltype) self.gap = Pool2D(pool_type='avg', global_pooling=True) if self.args.if_dropout: self.dropout = Dropout(p=0.5)
def __init__(self, num_classes, num_segments, dropout=0): super(ECO, self).__init__() self.num_segments = num_segments self.channel = 3 self.reshape = True self.dropout = dropout self.input_size = 224 self.bninception_pretrained = bninception.BNInception_pre(num_classes=num_classes) self.resnet3d = resnet_3d.resnet3d() self.dropout = Dropout(dropout) # self.fc_0 = Linear(512+1024, 400, bias_attr=True, act='leaky_relu') self.fc_final = Linear(512+1024, num_classes, bias_attr=True, act='softmax', param_attr=fluid.initializer.Xavier(uniform=False))
def forward(self, x0, x1): ''' x0, x1: Inputs to a cell ''' x0 = self.preprocess0(x0) x1 = self.preprocess1(x1) xs = [x0, x1] i = 0 for node in range(self.n_nodes): outputs = [] for _ in range(2): temp = self._ops[i](xs[self.genolist[i][1]]) temp = Dropout(p=self.drop_rate)(temp) outputs.append(temp) i += 1 xs.append(sum(outputs)) return fluid.layers.concat(xs[-self.n_nodes:], axis=1)
def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias): super(SuperMobileResnetBlock, self).__init__() self.conv_block = fluid.dygraph.LayerList([]) p = 0 if padding_type == 'reflect': self.conv_block.extend( [Pad2D(paddings=[1, 1, 1, 1], mode="reflect")]) elif padding_type == 'replicate': self.conv_block.extend([Pad2D(paddings=[1, 1, 1, 1], mode="edge")]) elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % self.padding_type) self.conv_block.extend([ SuperSeparableConv2D(num_channels=dim, num_filters=dim, filter_size=3, stride=1, padding=p), norm_layer(dim), ReLU() ]) self.conv_block.extend([Dropout(dropout_rate)]) p = 0 if padding_type == 'reflect': self.conv_block.extend( [Pad2D(paddings=[1, 1, 1, 1], mode="reflect")]) elif padding_type == 'replicate': self.conv_block.extend([Pad2D(paddings=[1, 1, 1, 1], mode="edge")]) elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % self.padding_type) self.conv_block.extend([ SuperSeparableConv2D(num_channels=dim, num_filters=dim, filter_size=3, stride=1, padding=p), norm_layer(dim) ])
def __init__(self, backbone, output_stride=16, class_dim=1000, **kwargs): super(XceptionDeeplab, self).__init__() bottleneck_params = gen_bottleneck_params(backbone) self.backbone = backbone self._conv1 = ConvBNLayer(3, 32, 3, stride=2, padding=1, act="relu", name=self.backbone + "/entry_flow/conv1") self._conv2 = ConvBNLayer(32, 64, 3, stride=1, padding=1, act="relu", name=self.backbone + "/entry_flow/conv2") """ bottleneck_params = { "entry_flow": (3, [2, 2, 2], [128, 256, 728]), "middle_flow": (16, 1, 728), "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) } if output_stride == 16: entry_block3_stride = 2 middle_block_dilation = 1 exit_block_dilations = (1, 2) elif output_stride == 8: entry_block3_stride = 1 middle_block_dilation = 2 exit_block_dilations = (2, 4) """ self.block_num = bottleneck_params["entry_flow"][0] self.strides = bottleneck_params["entry_flow"][1] self.chns = bottleneck_params["entry_flow"][2] self.strides = check_data(self.strides, self.block_num) self.chns = check_data(self.chns, self.block_num) self.entry_flow = [] self.middle_flow = [] self.stride = 2 self.output_stride = output_stride s = self.stride for i in range(self.block_num): stride = self.strides[i] if check_stride(s * self.strides[i], self.output_stride) else 1 xception_block = self.add_sublayer( self.backbone + "/entry_flow/block" + str(i + 1), Xception_Block( input_channels=64 if i == 0 else self.chns[i - 1], output_channels=self.chns[i], strides=[1, 1, self.stride], name=self.backbone + "/entry_flow/block" + str(i + 1))) self.entry_flow.append(xception_block) s = s * stride self.stride = s self.block_num = bottleneck_params["middle_flow"][0] self.strides = bottleneck_params["middle_flow"][1] self.chns = bottleneck_params["middle_flow"][2] self.strides = check_data(self.strides, self.block_num) self.chns = check_data(self.chns, self.block_num) s = self.stride for i in range(self.block_num): stride = self.strides[i] if check_stride(s * self.strides[i], self.output_stride) else 1 xception_block = self.add_sublayer( self.backbone + "/middle_flow/block" + str(i + 1), Xception_Block(input_channels=728, output_channels=728, strides=[1, 1, self.strides[i]], skip_conv=False, name=self.backbone + "/middle_flow/block" + str(i + 1))) self.middle_flow.append(xception_block) s = s * stride self.stride = s self.block_num = bottleneck_params["exit_flow"][0] self.strides = bottleneck_params["exit_flow"][1] self.chns = bottleneck_params["exit_flow"][2] self.strides = check_data(self.strides, self.block_num) self.chns = check_data(self.chns, self.block_num) s = self.stride stride = self.strides[0] if check_stride(s * self.strides[0], self.output_stride) else 1 self._exit_flow_1 = Xception_Block(728, self.chns[0], [1, 1, stride], name=self.backbone + "/exit_flow/block1") s = s * stride stride = self.strides[1] if check_stride(s * self.strides[1], self.output_stride) else 1 self._exit_flow_2 = Xception_Block( self.chns[0][-1], self.chns[1], [1, 1, stride], dilation=2, has_skip=False, activation_fn_in_separable_conv=True, name=self.backbone + "/exit_flow/block2") s = s * stride self.stride = s self._drop = Dropout(p=0.5) self._pool = Pool2D(pool_type="avg", global_pooling=True) self._fc = Linear(self.chns[1][-1], class_dim, param_attr=ParamAttr(name="fc_weights"), bias_attr=ParamAttr(name="fc_bias"))
def __init__(self, batch_size, sample): super(TSN, self).__init__() self.batch_size = batch_size self.sample = sample self.conv1 = Conv2DBNLayer(num_channels=3, num_filters=64, filter_size=7, stride=2, padding=3, act='relu') self.pool1 = Pool2D(pool_size=3, pool_type='max', pool_stride=2, pool_padding=1) self.conv2_reduce = Conv2DBNLayer(num_channels=64, num_filters=64, filter_size=1, act='relu') self.conv2 = Conv2DBNLayer(num_channels=64, num_filters=192, filter_size=3, padding=1, act='relu') self.pool2 = Pool2D(pool_size=3, pool_type='max', pool_stride=2, pool_padding=1) self.inception_3a = Inception3aLayer() self.inception_3b = Inception3bLayer() self.inception_3c_c3d = Inception3cForC3D() self.c3d = C3D() self.inception_3c_3x3_reduce = Conv2DBNLayer(num_channels=320, num_filters=128, filter_size=1, act='relu') self.inception_3c_3x3 = Conv2DBNLayer(num_channels=128, num_filters=160, filter_size=3, padding=1, stride=2, act='relu') self.inception_3c_double_3x3_2 = Conv2DBNLayer(num_channels=96, num_filters=96, padding=1, filter_size=3, stride=2) # inception_3c_pool<=Pooling<=inception_3b_output self.inception_3c_pool = Pool2D(pool_size=3, pool_type='max', pool_stride=2, pool_padding=1) # inception_3c_output <=Concat<= # inception_3c_3x3(14*14 160), # inception_3c_double_3x3_2(14*14 96), # inception_3c_pool(14*14 64) self.conv2dnets = Conv2DNets(batch_size, sample) # global_pool3D<=Pooling3d<=res5b_bn self.global_pool3D_drop = Dropout(p=0.3) # global_pool<=Concat<=global_pool2D_reshape_consensus,global_pool3D self.fc_finall = Linear(input_dim=1536, output_dim=101)
def __init__(self, batch_size, sample): super(Conv2DNets, self).__init__() self.batch_size = batch_size self.sample = sample self.inception_4a_1x1 = Conv2DBNLayer(num_channels=576, num_filters=224, filter_size=1, act='relu') self.inception_4a_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=64, filter_size=1, act='relu') self.inception_4a_3x3 = Conv2DBNLayer(num_channels=64, num_filters=96, padding=1, filter_size=3, act='relu') self.inception_4a_double_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') self.inception_4a_double_3x3_1 = Conv2DBNLayer(num_channels=96, num_filters=128, filter_size=3, padding=1, act='relu') self.inception_4a_double_3x3_2 = Conv2DBNLayer(num_channels=128, num_filters=128, filter_size=3, padding=1, act='relu') self.inception_4a_pool = Pool2D(pool_size=3, pool_type='avg', pool_padding=1) self.inception_4a_pool_proj = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') # inception_4a_output <=Concat<= # inception_4a_1x1_bn(14*14 224), # inception_4a_3x3_bn(14*14 96), # inception_4a_double_3x3_2_bn(14*14 128), # inception_4a_pool_proj_bn(14*14 128) self.inception_4b_1x1 = Conv2DBNLayer(num_channels=576, num_filters=192, filter_size=1, act='relu') self.inception_4b_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') self.inception_4b_3x3 = Conv2DBNLayer(num_channels=96, num_filters=128, filter_size=3, padding=1, act='relu') self.inception_4b_double_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') self.inception_4b_double_3x3_1 = Conv2DBNLayer(num_channels=96, num_filters=128, filter_size=3, padding=1, act='relu') self.inception_4b_double_3x3_2 = Conv2DBNLayer(num_channels=128, num_filters=128, filter_size=3, padding=1, act='relu') self.inception_4b_pool = Pool2D(pool_size=3, pool_type='avg', pool_padding=1) self.inception_4b_pool_proj = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') # inception_4b_output <=Concat<= # inception_4b_1x1_bn(14*14 192), # inception_4b_3x3_bn(14*14 128), # inception_4b_double_3x3_2_bn(14*14 128), # inception_4b_pool_proj_bn(14*14 128) self.inception_4c_1x1 = Conv2DBNLayer(num_channels=576, num_filters=160, filter_size=1, act='relu') self.inception_4c_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') self.inception_4c_3x3 = Conv2DBNLayer(num_channels=128, num_filters=160, filter_size=3, padding=1, act='relu') self.inception_4c_double_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') self.inception_4c_double_3x3_1 = Conv2DBNLayer(num_channels=128, num_filters=160, filter_size=3, padding=1, act='relu') self.inception_4c_double_3x3_2 = Conv2DBNLayer(num_channels=160, num_filters=160, filter_size=3, padding=1, act='relu') self.inception_4c_pool = Pool2D(pool_size=3, pool_type='avg', pool_padding=1) self.inception_4c_pool_proj = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') #inception_4c_output <=Concat<= # inception_4c_1x1_bn(14*14 160), # inception_4c_3x3_bn(14*14 160), # inception_4c_double_3x3_2_bn(14*14 160), # inception_4c_pool_proj_bn(14*14 96) self.inception_4d_1x1 = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') self.inception_4d_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') self.inception_4d_3x3 = Conv2DBNLayer(num_channels=128, num_filters=192, filter_size=3, padding=1, act='relu') self.inception_4d_double_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=160, filter_size=1, act='relu') self.inception_4d_double_3x3_1 = Conv2DBNLayer(num_channels=160, num_filters=192, filter_size=3, padding=1, act='relu') self.inception_4d_double_3x3_2 = Conv2DBNLayer(num_channels=192, num_filters=192, filter_size=3, padding=1, act='relu') self.inception_4d_pool = Pool2D(pool_size=3, pool_type='avg', pool_padding=1) self.inception_4d_pool_proj = Conv2DBNLayer(num_channels=576, num_filters=96, filter_size=1, act='relu') # inception_4d_output <=Concat<= # inception_4d_1x1_bn(14*14 96), # inception_4d_3x3_bn(14*14 192), # inception_4d_double_3x3_2_bn(14*14 192), # inception_4d_pool_proj_bn(14*14 96) self.inception_4e_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') self.inception_4e_3x3 = Conv2DBNLayer(num_channels=128, num_filters=192, filter_size=3, padding=1, stride=2, act='relu') # 7*7 self.inception_4e_double_3x3_reduce = Conv2DBNLayer(num_channels=576, num_filters=128, filter_size=1, act='relu') self.inception_4e_double_3x3_1 = Conv2DBNLayer(num_channels=128, num_filters=256, filter_size=3, padding=1, act='relu') self.inception_4e_double_3x3_2 = Conv2DBNLayer(num_channels=256, num_filters=256, filter_size=3, padding=1, stride=2, act='relu') # 7*7 self.inception_4e_pool = Pool2D(pool_size=3, pool_type='max', pool_stride=2, pool_padding=1) # 7*7 # inception_4e_output <=Concat<= # inception_4e_3x3_bn(7*7 192), # inception_4e_double_3x3_2_bn(7*7 256), # inception_4e_pool(7*7 576) self.inception_5a_1x1 = Conv2DBNLayer(num_channels=1024, num_filters=352, filter_size=1, act='relu') self.inception_5a_3x3_reduce = Conv2DBNLayer(num_channels=1024, num_filters=192, filter_size=1, act='relu') self.inception_5a_3x3 = Conv2DBNLayer(num_channels=192, num_filters=320, filter_size=3, padding=1, act='relu') self.inception_5a_double_3x3_reduce = Conv2DBNLayer(num_channels=1024, num_filters=160, filter_size=1, act='relu') self.inception_5a_double_3x3_1 = Conv2DBNLayer(num_channels=160, num_filters=224, filter_size=3, padding=1, act='relu') self.inception_5a_double_3x3_2 = Conv2DBNLayer(num_channels=224, num_filters=224, filter_size=3, padding=1, act='relu') self.inception_5a_pool = Pool2D(pool_size=3, pool_type='avg', pool_padding=1) self.inception_5a_pool_proj = Conv2DBNLayer(num_channels=1024, num_filters=128, filter_size=1, act='relu') # inception_5a_output <=Concat<= # inception_5a_1x1_bn(7*7 352), # inception_5a_3x3_bn(7*7 320), # inception_5a_double_3x3_2_bn(7*7 224), # inception_5a_pool_proj_bn(7*7 128) self.inception_5b_1x1 = Conv2DBNLayer(num_channels=1024, num_filters=352, filter_size=1, act='relu') self.inception_5b_3x3_reduce = Conv2DBNLayer(num_channels=1024, num_filters=192, filter_size=1, act='relu') self.inception_5b_3x3 = Conv2DBNLayer(num_channels=192, num_filters=320, filter_size=3, padding=1, act='relu') self.inception_5b_double_3x3_reduce = Conv2DBNLayer(num_channels=1024, num_filters=192, filter_size=1, act='relu') self.inception_5b_double_3x3_1 = Conv2DBNLayer(num_channels=192, num_filters=224, filter_size=3, padding=1, act='relu') self.inception_5b_double_3x3_2 = Conv2DBNLayer(num_channels=224, num_filters=224, filter_size=3, padding=1, act='relu') self.inception_5b_pool = Pool2D(pool_size=3, pool_type='max', pool_padding=1) self.inception_5b_pool_proj = Conv2DBNLayer(num_channels=1024, num_filters=128, filter_size=1, act='relu') # inception_5b_output <=Concat<= # inception_5b_1x1_bn(7*7 352), # inception_5b_3x3_bn(7*7 320), # inception_5b_double_3x3_2_bn(7*7 224), # inception_5b_pool_proj_bn(7*7 128) self.global_pool2D_pre = Pool2D(pool_size=7, pool_type='avg') # 1*1 1024 self.global_pool2D_pre_drop = Dropout(p=0.5) # global_pool2D_reshape_consensus<=Pooling3d<=global_pool2D_pre # global_pool3D<=Pooling3d<=res5b_bn self.global_pool3D_drop = Dropout(p=0.3)
def __init__(self, name_scope, block, layers, inp=3, num_classes=51, input_size=112, dropout=0.2, n_iter=20, learnable=[0, 1, 1, 1, 1]): self.inplanes = 64 self.inp = inp super(ResNet, self).__init__(name_scope) ###新增:光流表示层FCF self.flow_cmp = Conv2D(128 * block.expansion, 32, filter_size=1, stride=1, padding=0, bias_attr=False) self.flow_layer = FlowLayer(channels=32, n_iter=n_iter, params=learnable) # 光流表示层 self.flow_conv = Conv2D(64, 64, filter_size=3, stride=1, padding=1, bias_attr=False) # Flow-of-flow self.flow_cmp2 = Conv2D(64, 32, filter_size=1, stride=1, padding=0, bias_attr=False) self.flow_layer2 = FlowLayer(channels=32, n_iter=n_iter, params=learnable) # 光流表示层 self.flow_conv2 = Conv2D(64, 128 * block.expansion, filter_size=3, stride=1, padding=1, bias_attr=False) self.bnf = BatchNorm(128 * block.expansion, act='leaky_relu') ### self.conv1 = ConvBNLayer(self.full_name(), num_channels=inp, num_filters=64, filter_size=7, stride=2, padding=3) self.maxpool = Pool2D(pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') # resnet50 layers=[3,4,6,3] self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # probably need to adjust this based on input spatial size size = int(math.ceil(input_size / 32)) self.avgpool = Pool2D(pool_size=size, pool_stride=1, pool_type='avg') self.dropout = Dropout(p=dropout) self.fc = Linear(input_dim=512 * block.expansion, output_dim=num_classes, bias_attr=False, act='softmax') for m in self.sublayers(): if isinstance(m, Conv2D): # 实验性方法,kaiming初始化 m = Conv2D(num_channels=m._num_channels, num_filters=m._num_filters, filter_size=m._filter_size, stride=m._stride, padding=m._padding, groups=m._groups, act=m._act, bias_attr=m._bias_attr, param_attr=fluid.initializer.MSRAInitializer( uniform=False)) '''
def __init__(self, name_scope, num_segments=32, class_dim=101): super(ECOFull, self).__init__(name_scope) self.num_segments = num_segments self.class_dim = class_dim self.conv1_x = ConvBNLayer(self.full_name(), num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu') #这里一定要加上ceil_mode self.pool1 = Pool2D(pool_size=3, pool_stride=2, pool_type='max', ceil_mode=True) #不清楚这个是否有用,待debug self.conv2_reduce = ConvBNLayer(self.full_name(), num_channels=64, num_filters=64, filter_size=1, stride=1, act=None) self.conv2_x = ConvBNLayer(self.full_name(), num_channels=64, num_filters=192, filter_size=3, stride=1, act='relu') self.pool2 = Pool2D(pool_size=3, pool_stride=2, pool_type='max', ceil_mode=True) # print("inception_3a") self.inception_3a = InceptionBasic(self.full_name(), 192, [64, 64, 64, 64, 96, 96, 32]) # print("inception_3b") self.inception_3b = InceptionBasic(self.full_name(), 256, [64, 64, 96, 64, 96, 96, 64]) #第3个inception inception_3c_filterlist = [128, 160, 64, 96, 96] self.inception_3c_1 = Inception3c_1(self.full_name(), 320, inception_3c_filterlist) self.inception_3c_2a = Inception3c_2a(self.full_name(), 320, inception_3c_filterlist) #3Net self.eco_3dnet = ECO3dNet(self.full_name()) #2dNets self.inception_3c_2b = ConvBNLayer( self.full_name(), num_channels=inception_3c_filterlist[3], num_filters=inception_3c_filterlist[4], filter_size=3, stride=2, act='relu') self.inception_3c_pool = Pool2D(pool_size=3, pool_stride=2, pool_type="max", ceil_mode=True) #没有convbn # print("inception_4a") self.inception_4a = InceptionBasic(self.full_name(), 576, [224, 64, 96, 96, 128, 128, 128]) # print("inception_4b") self.inception_4b = InceptionBasic(self.full_name(), 576, [192, 96, 128, 96, 128, 128, 128]) # print("inception_4c") self.inception_4c = InceptionBasic(self.full_name(), 576, [160, 128, 160, 128, 160, 160, 96]) self.inception_4d = InceptionBasic(self.full_name(), 576, [96, 128, 192, 160, 192, 192, 96]) self.inception_4e = Inception4e(self.full_name(), 576, [128, 192, 192, 256, 256]) #没有convbn self.inception_5a = InceptionBasic(self.full_name(), 1024, [352, 192, 320, 160, 224, 224, 128]) # print("inception_5b") self.inception_5b = InceptionBasic(self.full_name(), 1024, [352, 192, 320, 192, 224, 224, 128], 'max') self.end_pool1 = Pool2D(pool_size=7, pool_stride=1, pool_type="avg", ceil_mode=True) self.drop_out1 = Dropout(p=0.5) self.drop_out2 = Dropout(p=0.3) self.out = Linear(input_dim=1536, output_dim=self.class_dim, act='softmax')