Ejemplo n.º 1
0
 def __init__(self):
     super().__init__()
     self.flatten = nn.Flatten()
     self.Linear_1 = nn.Linear(28 * 28, 128)
     self.sig = nn.Sigmoid()
     self.Linear_2 = nn.Linear(128, 84)
     self.Linear_3 = nn.Linear(84, 10)
Ejemplo n.º 2
0
 def __init__(self, nc, DIM, cb_num=8, drop_rate=0.3):
     super().__init__()
     self.prob = drop_rate
     self.encoder = Encoder(cb_num)
     fea_dim = int(64 * DIM / (2**cb_num))  # 256
     h_dim = 128
     self.linear1 = nn.Linear(fea_dim, h_dim)
     self.bn1 = nn.BatchNorm1d(h_dim)
     self.linear2 = nn.Linear(h_dim, nc)
Ejemplo n.º 3
0
 def __init__(self, in_planes, reduction=16):
     super(SELayer, self).__init__()
     # 返回1X1大小的特征图,通道数不变
     self.avg_pool = nn.AdaptiveAvgPool1d(1)
     self.fc = nn.Sequential(
         nn.Linear(in_planes, in_planes // reduction, bias=False),
         nn.ReLU(),  # inplace = True, 计算值直接覆盖之前的值,最好默认为False,否则报错
         nn.Linear(in_planes // reduction, in_planes, bias=False),
         nn.Sigmoid())
Ejemplo n.º 4
0
 def __init__(self, gate_channels, reduction_ratio=16):
     super(ChannelGate, self).__init__()
     self.gate_channels = gate_channels
     self.gap1 = nn.AdaptiveAvgPool1d(output_size=1)
     self.gap2 = nn.AdaptiveAvgPool1d(output_size=1)
     self.mlp = nn.Sequential(
         Flatten(),
         nn.Linear(gate_channels, gate_channels // reduction_ratio),
         nn.ReLU(),
         nn.Linear(gate_channels // reduction_ratio, gate_channels))
Ejemplo n.º 5
0
 def __init__(self):
     super().__init__()
     self.conv2d_1 = nn.Conv2d(1,6,5,padding=2)
     self.avgpool2d = nn.AvgPool2d(2,stride=2)
     self.conv2d_2 = nn.Conv2d(6,16,5)
     self.flatten = nn.Flatten()
     self.sig = nn.Sigmoid()
     self.linear_1 = nn.Linear(16*5*5, 120)
     self.linear_2 = nn.Linear(120, 84)
     self.linear_3 = nn.Linear(84, 10)
Ejemplo n.º 6
0
    def __init__(self, n_states, n_actions, n_hidden, lr, device):
        super(DDPGCritic, self).__init__()
        self.device = device

        self.input = nn.Linear(n_states+n_actions, n_hidden)
        self.l1 = nn.Linear(n_hidden, n_hidden)
        self.l2 = nn.Linear(n_hidden, n_hidden)
        self.out = nn.Linear(n_hidden, 1)

        self.optimizer = optim.Adam(self.parameters(), lr=lr)
        self.to(self.device)
Ejemplo n.º 7
0
    def __init__(self, n_states, n_actions, n_hidden, lr, device):
        super(DDPGActor, self).__init__()
        self.device = device

        self.input = nn.Linear(n_states, n_hidden)
        self.l1 = nn.Linear(n_hidden, n_hidden)
        self.out = nn.Linear(n_hidden, n_actions)

        self.dropout = nn.Dropout(0.1)

        self.optimizer = optim.SGD(self.parameters(), lr=lr)
        self.to(device)
Ejemplo n.º 8
0
    def __init__(self,
                 h,
                 d_model,
                 k,
                 last_feat_height,
                 last_feat_width,
                 scales=1,
                 dropout=0.1,
                 need_attn=False):
        """
        :param h: number of self attention head
        :param d_model: dimension of model
        :param dropout:
        :param k: number of keys
        """
        super(DeformableHeadAttention, self).__init__()
        assert h == 8  # currently header is fixed 8 in paper
        assert d_model % h == 0
        # We assume d_v always equals d_k, d_q = d_k = d_v = d_m / h
        self.d_k = int(d_model / h)
        self.h = h

        self.q_proj = nn.Linear(d_model, d_model)
        self.k_proj = nn.Linear(d_model, d_model)

        self.scales_hw = []
        for i in range(scales):
            self.scales_hw.append(
                [last_feat_height * 2**i, last_feat_width * 2**i])

        self.dropout = None
        if self.dropout:
            self.dropout = nn.Dropout(p=dropout)

        self.k = k
        self.scales = scales
        self.last_feat_height = last_feat_height
        self.last_feat_width = last_feat_width

        self.offset_dims = 2 * self.h * self.k * self.scales
        self.A_dims = self.h * self.k * self.scales

        # 2MLK for offsets MLK for A_mlqk
        self.offset_proj = nn.Linear(d_model, self.offset_dims)
        self.A_proj = nn.Linear(d_model, self.A_dims)

        self.wm_proj = nn.Linear(d_model, d_model)

        self.need_attn = need_attn

        self.attns = []
        self.offsets = []
Ejemplo n.º 9
0
 def __init__(self, DIM):
     super().__init__()
     NUM_BLOCK = 8
     FEATURE_CHN = 64
     x_dim = int(FEATURE_CHN *
                 (DIM // 2**NUM_BLOCK))  # 2048:8192; 1024:4096
     feature = 256  # 100(original), 64(CW2SQ), 32, 16. choose: 256, 16
     # print('The NUM of ConvBlocK: {}'.format(NUM_BLOCK))
     print('The FC features: {}\n'.format(feature))
     self.create_feat = nn.Linear(x_dim,
                                  feature)  # weight shape: (feature, x_dim)
     self.discriminator = nn.Sequential(nn.BatchNorm1d(feature), nn.ReLU(),
                                        nn.Linear(feature, 2))
Ejemplo n.º 10
0
    def __init__(self, in_dim, n_class):
        super(LeNet, self).__init__()
        self.conv = nn.container.Sequential(
            nn.conv.Conv2d(in_dim, 6, 5, stride=1,
                           padding=0),  #1*28*18-->6*24*24
            nn.activation.ReLU(True),
            nn.pooling.MaxPool2d(2, 2),  #6*24*24-->6*12*12
            nn.conv.Conv2d(6, 16, 5, stride=1, padding=0),  #6*12*12-->16*8*8
            nn.activation.ReLU(True),
            nn.pooling.MaxPool2d(2, 2))  #16*8*8-->16*4*4

        self.fc = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.Linear(120, 84),
                                nn.Linear(84, n_class))
    def __init__(self, kwargs):
        super(DDPGCritic, self).__init__()
        self.n_states = kwargs['obs_space']
        self.n_actions = kwargs['action_space']
        self.n_hidden = kwargs['n_hidden_critic']
        self.n_agents = kwargs['n_agents']
        self.lr = kwargs['lr_critic']
        self.device = kwargs['device']

        self.input = nn.Linear((self.n_states + self.n_actions) * self.n_agents, self.n_hidden[0])
        self.l1 = nn.Linear(self.n_hidden[0], self.n_hidden[1])
        self.out = nn.Linear(self.n_hidden[1], 1)

        self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
        self.to(self.device)
    def __init__(self, kwargs):
        super(DDPGActor, self).__init__()
        self.n_states = kwargs['obs_space']
        self.n_actions = kwargs['action_space']
        self.n_hidden = kwargs['n_hidden_actor']
        self.lr = kwargs['lr_actor']
        self.device = kwargs['device']

        self.input = nn.Linear(self.n_states, self.n_hidden[0])
        self.l1 = nn.Linear(self.n_hidden[0], self.n_hidden[1])
        self.l2 = nn.Linear(self.n_hidden[1], self.n_hidden[2])
        self.out = nn.Linear(self.n_hidden[2], self.n_actions)

        self.optimizer = optim.Adam(self.parameters(), lr=self.lr)
        self.to(self.device)
Ejemplo n.º 13
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 num_layers=1,
                 dropout=0,
                 bidirectional=False):
        super(RNNModel, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional

        if bidirectional:
            num_directions = 2
        else:
            num_directions = 1

        # 创建模型
        self.rnn = modules.LSTM(input_size,
                                hidden_size,
                                num_layers,
                                batch_first=True,
                                dropout=self.dropout,
                                bidirectional=self.bidirectional)
        if self.dropout != 0:
            self.drop = modules.Dropout(dropout)
        # 创建全连接层
        self.fc = modules.Linear(hidden_size * num_directions, output_size)
        # 设置激活函数
        self.activation = modules.LogSoftmax(dim=1)
Ejemplo n.º 14
0
    def __init__(self, block, layers, num_classes=10):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.initdata = nn.Conv2d(1,
                                  64,
                                  kernel_size=7,
                                  stride=1,
                                  padding=3,
                                  bias=False)
        self.bn0 = nn.BatchNorm2d(64)
        self.relu0 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self.make_layers(block, 64, layers[0])
        self.layer2 = self.make_layers(block, 128, layers[1], stride=1)
        self.layer3 = self.make_layers(block, 256, layers[2], stride=1)
        self.layer4 = self.make_layers(block, 512, layers[3], stride=2)

        self.avg = nn.AvgPool2d(7, stride=1)
        self.full = nn.Linear(512 * block.expansion, num_classes)
        self.sigmoid = nn.Sigmoid()

        for m in self.modules():
            if isinstance(
                    m,
                    nn.Conv2d):  # isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
                init.kaiming_normal_(m.weight,
                                     mode='fan_out',
                                     nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
Ejemplo n.º 15
0
    def __init__(self,
                 dict_size_1: int,
                 dict_size_2: int,
                 embedding_dim,
                 nhead=6,
                 d_model=512,
                 input_len=50,
                 output_len=50):
        super(MyTransformer, self).__init__()

        # self.pos_encoder = nn.PositionalEncoding()
        self.embeding1 = nn.Embedding(
            num_embeddings=dict_size_1, embedding_dim=embedding_dim
        )  # num_embeedings: 字典大小, embeeding_dim:每个单词的输出维度
        self.embeding2 = nn.Embedding(num_embeddings=dict_size_2,
                                      embedding_dim=embedding_dim)
        # B L E

        # d_model就是attention层的embed_dim,指定每个输入词的维度
        self.transformer = nn.Transformer(d_model=embedding_dim,
                                          nhead=nhead,
                                          num_encoder_layers=6,
                                          num_decoder_layers=6,
                                          dim_feedforward=2048)
        # output: :math:`(T, N, E)`. target_len, batch_size, embedding_dim
        # 处理下outpu的输出

        self.linear1 = nn.Linear(embedding_dim, dict_size_2)
        # self.linear = nn.Linear(embedding_dim, dict_size_2)
        self.softmax = nn.Softmax(dim=-1)
Ejemplo n.º 16
0
 def __init__(self, input_size, output_size, num_channels, kernel_size,
              dropout):
     super(TCN, self).__init__()
     self.tcn = TemporalConvNet(input_size,
                                num_channels,
                                kernel_size=kernel_size,
                                dropout=dropout)
     self.linear = modules.Linear(num_channels[-1], output_size)
Ejemplo n.º 17
0
    def __init__(self, feature_dim, n_classes, head=2):
        super().__init__()
        self.n_classes = n_classes
        self.feature_dim = feature_dim
        self.head = head

        for i in range(head):
            self.__setattr__("fc{}".format(i),
                             modules.Linear(feature_dim, n_classes))
Ejemplo n.º 18
0
    def __init__(self):
        super().__init__()

        self.relu = nn.ReLU()
        self.avgpool2d = nn.AvgPool2d(2, stride=2)

        #输入部分
        self.conv2d_1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
        self.batchnorm2d = nn.BatchNorm2d(6)

        #中间残差块
        self.conv2d_2 = nn.Conv2d(6, 6, kernel_size=3, padding=1)

        #输出部分
        self.conv2d_3 = nn.Conv2d(6, 6, 5)
        self.flatten = nn.Flatten()
        self.sig = nn.Sigmoid()
        self.linear_1 = nn.Linear(6 * 5 * 5, 64)
        self.linear_2 = nn.Linear(64, 10)
Ejemplo n.º 19
0
    def __init__(self, rnn_size, embedding_size, input_size, output_size,
                 grids_width, grids_height, dropout_par, device):

        super(VPTLSTM, self).__init__()
        ######参数初始化##########
        self.device = device
        self.rnn_size = rnn_size  # hidden size默认128
        self.embedding_size = embedding_size  # 空间坐标嵌入尺寸64,每个状态用64维向量表示
        self.input_size = input_size  # 输入尺寸6,特征向量长度
        self.output_size = output_size  # 输出尺寸5
        self.grids_width = grids_width
        self.grids_height = grids_height
        self.dropout_par = dropout_par

        ############网络层初始化###############
        # 输入embeded_input,hidden_states
        self.cell = nn.LSTMCell(2 * self.embedding_size, self.rnn_size)

        # 输入Embed层,将长度为input_size的vec映射到embedding_size
        self.input_embedding_layer = nn.Linear(self.input_size,
                                               self.embedding_size)

        # 输入[vehicle_num,grids_height,grids_width,rnn_size]  [26,39,5,128]
        # 输出[vehicle_num,grids_height-12,grids_width-4,rnn_size*4]  [26,27,1,32]
        self.social_tensor_conv1 = nn.Conv2d(in_channels=self.rnn_size,
                                             out_channels=self.rnn_size // 2,
                                             kernel_size=(5, 3),
                                             stride=(2, 1))
        self.social_tensor_conv2 = nn.Conv2d(in_channels=self.rnn_size // 2,
                                             out_channels=self.rnn_size // 4,
                                             kernel_size=(5, 3),
                                             stride=1)
        self.social_tensor_embed = nn.Linear(
            (self.grids_height - 15) * (self.grids_width - 4) *
            self.rnn_size // 4, self.embedding_size)

        # 输出Embed层,将长度为64的hidden_state映射到5
        self.output_layer = nn.Linear(self.rnn_size, self.output_size)

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(self.dropout_par)
Ejemplo n.º 20
0
    def __init__(self,
                 model_name,
                 input_size,
                 hidden_size,
                 batch_size,
                 kernel_size,
                 out_channels,
                 output_size,
                 num_layers=1,
                 dropout=0,
                 bidirectional=False,
                 bn=False):
        super(LACModel, self).__init__()

        self.model_name = model_name
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.kernel_size = kernel_size
        self.out_channels = out_channels
        self.output_size = output_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.bn = bn

        self.pm_model = LACModelUnit('PM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        self.gm_model = LACModelUnit('GM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        self.sm_model = LACModelUnit('SM-{0}'.format(model_name), input_size,
                                     hidden_size, batch_size, kernel_size,
                                     out_channels, num_layers, dropout,
                                     bidirectional, bn)
        # 判断是否是双线LSTM
        if bidirectional:
            num_directions = 2
        else:
            num_directions = 1

        # 创建全连接层
        self.fc = modules.Linear(
            (hidden_size * num_directions - kernel_size + 1) * out_channels *
            3, output_size)
        # BN层
        self.bn = modules.BatchNorm1d(output_size)
        # LSTM激活函数
        self.activation = modules.Softmax(dim=1)
Ejemplo n.º 21
0
    def __init__(self, n_states, n_actions, n_hidden, lr):
        super(ActorCritic, self).__init__()

        self.input = nn.Linear(n_states, n_hidden)
        self.hidden_1 = nn.Linear(n_hidden, n_hidden)
        self.hidden_2 = nn.Linear(n_hidden, n_hidden)
        self.out_actor_sigma = nn.Linear(n_hidden, n_actions)
        self.out_actor_mu = nn.Linear(n_hidden, n_actions)
        self.out_critic = nn.Sequential(nn.Linear(n_hidden, n_hidden),
                                        nn.ReLU(), nn.Linear(n_hidden, 1))

        #self.bn_1 = nn.BatchNorm1d(n_hidden)
        #self.bn_2 = nn.BatchNorm1d(n_hidden)

        self.optimizer = optim.SGD(self.parameters(), lr=lr)
Ejemplo n.º 22
0
 def __init__(self, num_class=10):
     super(VGG16, self).__init__()
     self.feature = modules.Sequential(
         # #1,
         modules.Conv2d(3, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         #2
         modules.Conv2d(64, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #3
         modules.Conv2d(64, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         # modules.MaxPool2d(kernel_size=2,stride=2),
         #4
         modules.Conv2d(128, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #5
         modules.Conv2d(128, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #6
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #7
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #8
         modules.Conv2d(256, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #9
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #10
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #11
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #12
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #13
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         modules.AvgPool2d(kernel_size=1, stride=1),
     )
     # 全连接层
     self.classifier = modules.Sequential(
         # #14
         modules.Linear(512, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #15
         modules.Linear(4096, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #16
         modules.Linear(4096, num_class),
     )
Ejemplo n.º 23
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(nm.Linear(input_num, hidden_num), nm.ReLU(),
                              nm.Linear(hidden_num, output_num))
Ejemplo n.º 24
0
    def __init__(self):
        super(ModelClass, self).__init__()
        device = torch.device('cpu')
        net = tvM.resnet50()
        summary(net.to(device), (3, 224, 224))

        self.backbone = tvM.resnet50(pretrained=True)

        # 固定输入尺寸224后才可用的全连接层,无法多尺度训练
        # self.backbone.fc = nnM.Sequential(nnM.Linear(2048, 1024), nnM.Linear(1024, 9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.fc = nnM.Sequential(nnM.AdaptiveAvgPool1d(1024), nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.fc = nnM.Sequential(nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.Sequential( nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)  ???
        # self.backbone.avgpool = nnM.AvgPool3d(kernel_size=7, stride=1, padding=0)
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        # 打印出来resnet后两层观察(224*224输入):
        #    Bottleneck-172           [-1, 2048, 7, 7]       0
        #    AvgPool2d-173           [-1, 2048, 1, 1]        0
        #    Linear-174                 [-1, 1000]       2,049,000
        # 在AvgPool2d之后,由于池化kernal等于7,已经相当于是全局pool了,输出一定是只有一个像素的2d(channel和batch两个维度),所以上面一直报错“got 2-dimensional”

        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)  ???
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,5))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #size mismatch, m1: [35 x 51200], m2: [25 x 9] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,5))
        # self.backbone.fc = nnM.Linear(25,9)

        #avgpool output is: (35, 2048, 5, 1)
        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,1))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35,)   说明0尺度还是可以设的
        #Expected 3-dimensional tensor, but got 1-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((0,5,5))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35, 5, 5)
        #fc output is: (35, 5, 9)     为啥AdaptiveAvgPool1d输出是3d???见结论2
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((5,5,0))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35, 9, 1, 1)
        #求loss时  RuntimeError: invalid argument 3: only batches of spatial targets supported (3D tensors) but got targets of dimension: 1 at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/THNN/generic/SpatialClassNLLCriterion.c:60
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((9,1,1))

        #成功一、删掉 fc层,直接一个GAP-3D搞定。
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((9,0,0))

        #成功二、留下fc层,用GAP-2D代替原来的pool2d。
        self.backbone.avgpool = nnM.AdaptiveAvgPool2d((1, 1))
        self.backbone.fc = nnM.Sequential(nnM.Linear(2048, 1024),
                                          nnM.Linear(1024, 9))