Esempio n. 1
0
    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
        """
        Make resnet layer
        :param block: Bottleneck
        :param planes:
        :param blocks:
        :param stride:
        :param dilation:
        :return:
        """
        BatchNorm = self._BatchNorm
        downsample = None
        # need conv in shortcut
        if stride != 1 or self.in_planes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.in_planes, planes * block.expansion, stride),
                BatchNorm(planes * block.expansion),
            )

        layers = list()
        layers.append(
            block(self.in_planes, planes, stride, downsample, dilation))
        self.in_planes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.in_planes, planes, dilation=dilation))

        return nn.Sequential(*layers)
Esempio n. 2
0
    def __init__(self):
        super(ASPP, self).__init__()

        in_planes = 2048
        dilations = [1, 6, 12, 18]

        # all aspp module output feature maps with channel 256
        self.aspp1 = _ASPPModule(in_planes, planes=256, kernel_size=1, padding=0, dilation=dilations[0])
        self.aspp2 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[1], dilation=dilations[1])
        self.aspp3 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[2], dilation=dilations[2])
        self.aspp4 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[3], dilation=dilations[3])

        # perform global average pooling on the last feature map of the backbone
        # batchsize must be greater than 1, otherwise exception will be thrown in calculating BatchNorm
        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(in_planes, 256, 1, stride=1, bias=False),
                                             nn.BatchNorm2d(256),
                                             nn.ReLU())

        self.p1 = nn.AdaptiveAvgPool2d(1)
        self.p2 = nn.Conv2d(in_planes, 256, 1, stride=1, bias=False)
        self.p3 = nn.BatchNorm2d(256)

        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(256)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self._init_weight()
Esempio n. 3
0
 def make_layers(self, block, planes, num_blocks, stride=1):
     downsample = None
     layers = []
     if stride != 1 or self.inplanes != block.expansion * planes:
         downsample = nn.Sequential(
             nn.Conv2d(self.inplanes,
                       block.expansion * planes,
                       kernel_size=1,
                       stride=stride,
                       bias=False),
             nn.BatchNorm2d(block.expansion * planes))
     layers.append(
         block(self.inplanes, planes, stride=stride, downsample=downsample))
     self.inplanes = planes * block.expansion
     for i in range(1, num_blocks):
         layers.append(block(self.inplanes, planes))
     return nn.Sequential(*layers)
Esempio n. 4
0
 def __init__(self, in_chn=1, cb_num=8):
     super().__init__()
     print('The Convolution Channel: {}'.format(Conv_CHN))
     print('The Convolution Block: {}'.format(cb_num))
     # self.se1 = SELayer(Convolution_CHN)
     conv1 = conv_block(in_chn, Conv_CHN)
     conv_more = [conv_block(Conv_CHN, Conv_CHN) for i in range(cb_num - 1)]
     self.conv_blocks = nn.Sequential(conv1, *conv_more)
Esempio n. 5
0
 def __init__(self, in_planes, reduction=16):
     super(SELayer, self).__init__()
     # 返回1X1大小的特征图,通道数不变
     self.avg_pool = nn.AdaptiveAvgPool1d(1)
     self.fc = nn.Sequential(
         nn.Linear(in_planes, in_planes // reduction, bias=False),
         nn.ReLU(),  # inplace = True, 计算值直接覆盖之前的值,最好默认为False,否则报错
         nn.Linear(in_planes // reduction, in_planes, bias=False),
         nn.Sigmoid())
Esempio n. 6
0
 def __init__(self, gate_channels, reduction_ratio=16):
     super(ChannelGate, self).__init__()
     self.gate_channels = gate_channels
     self.gap1 = nn.AdaptiveAvgPool1d(output_size=1)
     self.gap2 = nn.AdaptiveAvgPool1d(output_size=1)
     self.mlp = nn.Sequential(
         Flatten(),
         nn.Linear(gate_channels, gate_channels // reduction_ratio),
         nn.ReLU(),
         nn.Linear(gate_channels // reduction_ratio, gate_channels))
Esempio n. 7
0
def conv_block(in_channels, out_channels):
    return nn.Sequential(
        nn.Conv1d(in_channels,
                  out_channels,
                  kernel_size=K_SIZE,
                  padding=PADDING),
        nn.BatchNorm1d(out_channels),
        nn.ReLU(),
        nn.MaxPool1d(kernel_size=2),
    )
Esempio n. 8
0
    def _make_multi_grid_layer(self,
                               block,
                               planes,
                               blocks,
                               stride=1,
                               dilation=1):
        """
        Multi-grid unit
        :param block: Bottleneck
        :param planes:
        :param blocks:
        :param stride:
        :param dilation:
        :return:
        """
        downsample = None
        if stride != 1 or self.in_planes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.in_planes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = list()
        layers.append(
            block(self.in_planes,
                  planes,
                  stride,
                  dilation=blocks[0] * dilation,
                  downsample=downsample))
        self.in_planes = planes * block.expansion
        for i in range(1, len(blocks)):
            layers.append(
                block(self.in_planes,
                      planes,
                      stride=1,
                      dilation=blocks[i] * dilation))

        return nn.Sequential(*layers)
Esempio n. 9
0
 def __init__(self, cb_num=8):
     super().__init__()
     self.h_dim = 64
     self.z_dim = 64
     self.channel = 1
     print('The Convolution Channel: {}'.format(self.h_dim))
     print('The Convolution Block: {}'.format(cb_num))
     conv1 = conv_block(self.channel, self.z_dim)
     conv_more = [
         conv_block(self.h_dim, self.z_dim) for i in range(cb_num - 1)
     ]
     self.conv_blocks = nn.Sequential(conv1, *conv_more)
Esempio n. 10
0
    def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
        super(SpatialOCR_Module, self).__init__()

        self.object_context_block = ObjectAttentionBlock(in_channels, key_channels, scale, bn_type)
        _in_channels = 2 * in_channels

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout)
        )
Esempio n. 11
0
    def __init__(self, in_dim, n_class):
        super(LeNet, self).__init__()
        self.conv = nn.container.Sequential(
            nn.conv.Conv2d(in_dim, 6, 5, stride=1,
                           padding=0),  #1*28*18-->6*24*24
            nn.activation.ReLU(True),
            nn.pooling.MaxPool2d(2, 2),  #6*24*24-->6*12*12
            nn.conv.Conv2d(6, 16, 5, stride=1, padding=0),  #6*12*12-->16*8*8
            nn.activation.ReLU(True),
            nn.pooling.MaxPool2d(2, 2))  #16*8*8-->16*4*4

        self.fc = nn.Sequential(nn.Linear(16 * 4 * 4, 120), nn.Linear(120, 84),
                                nn.Linear(84, n_class))
Esempio n. 12
0
 def __init__(self, DIM):
     super().__init__()
     NUM_BLOCK = 8
     FEATURE_CHN = 64
     x_dim = int(FEATURE_CHN *
                 (DIM // 2**NUM_BLOCK))  # 2048:8192; 1024:4096
     feature = 256  # 100(original), 64(CW2SQ), 32, 16. choose: 256, 16
     # print('The NUM of ConvBlocK: {}'.format(NUM_BLOCK))
     print('The FC features: {}\n'.format(feature))
     self.create_feat = nn.Linear(x_dim,
                                  feature)  # weight shape: (feature, x_dim)
     self.discriminator = nn.Sequential(nn.BatchNorm1d(feature), nn.ReLU(),
                                        nn.Linear(feature, 2))
Esempio n. 13
0
 def __init__(self, in_channels, key_channels, scale=1, bn_type=None):
     super(ObjectAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.key_channels = key_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_pixel = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_object = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_down = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_up = nn.Sequential(
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
Esempio n. 14
0
    def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32):
        """
        :param in_channels:  输入通道维度
        :param out_channels: 输出通道维度   原论文中 输入输出通道维度相同
        :param stride:  步长,默认为1
        :param M:  分支数
        :param r: 特征Z的长度,计算其维度d 时所需的比率(论文中 特征S->Z 是降维,故需要规定 降维的下界)
        :param L:  论文中规定特征Z的下界,默认为32
        """
        super(SK_Conv1d, self).__init__()
        d = max(in_channels // r, L)  # 计算向量Z 的长度d
        self.M = M
        self.out_channels = out_channels
        self.conv = nn.ModuleList()  # 根据分支数量 添加 不同核的卷积操作
        for i in range(M):
            # 为提高效率,原论文中 扩张卷积5x5为 (3X3,dilation=2)来代替, 且论文中建议组卷积G=32,
            # 每组计算只有out_channel/groups = 2 个channel参与.
            self.conv.append(
                nn.Sequential(
                    nn.Conv1d(in_channels,
                              out_channels,
                              3,
                              stride,
                              padding=1 + i,
                              dilation=1 + i,
                              groups=32,
                              bias=False), nn.BatchNorm1d(out_channels),
                    nn.ReLU(inplace=True)))
        self.global_pool = nn.AdaptiveAvgPool1d(
            1)  # 自适应pool到指定维度, 这里指定为1,实现 GAP

        self.fc1 = nn.Sequential(nn.Conv1d(out_channels, d, 1, bias=False),
                                 nn.BatchNorm1d(d),
                                 nn.ReLU(inplace=True))  # 降维
        self.fc2 = nn.Conv1d(d, out_channels * M, 1, 1, bias=False)  # 升维
        # self.fcs = nn.ModuleList(self.fc1, self.fc2)
        self.softmax = nn.Softmax(
            dim=1)  # 指定dim=1  使得两个全连接层对应位置进行softmax,保证 对应位置a+b+..=1
    def __init__(self, n_states, n_actions, n_hidden, lr):
        super(ActorCritic, self).__init__()

        self.input = nn.Linear(n_states, n_hidden)
        self.hidden_1 = nn.Linear(n_hidden, n_hidden)
        self.hidden_2 = nn.Linear(n_hidden, n_hidden)
        self.out_actor_sigma = nn.Linear(n_hidden, n_actions)
        self.out_actor_mu = nn.Linear(n_hidden, n_actions)
        self.out_critic = nn.Sequential(nn.Linear(n_hidden, n_hidden),
                                        nn.ReLU(), nn.Linear(n_hidden, 1))

        #self.bn_1 = nn.BatchNorm1d(n_hidden)
        #self.bn_2 = nn.BatchNorm1d(n_hidden)

        self.optimizer = optim.SGD(self.parameters(), lr=lr)
Esempio n. 16
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_in_planes = 256

        self.conv1 = nn.Conv2d(low_level_in_planes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
        self._init_weight()
Esempio n. 17
0
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 kernel_size,
                 stride,
                 dilation,
                 padding,
                 dropout=0.2):
        super(TemporalBlock, self).__init__()
        # 定义残差模块的第一层扩张卷积
        # 经过conv,输出的size为(Batch, input_channel, seq_len + padding),并归一化模型的参数
        self.conv1 = weight_norm(
            modules.Conv1d(n_inputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        # 裁剪掉多出来的padding部分,维持输出时间步为seq_len
        self.chomp1 = Chomp1d(padding)
        self.relu1 = modules.ReLU()
        self.dropout1 = modules.Dropout(dropout)

        #定义残差模块的第二层扩张卷积
        self.conv2 = weight_norm(
            modules.Conv1d(n_outputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        self.chomp2 = Chomp1d(padding)
        self.relu2 = modules.ReLU()
        self.dropout2 = modules.Dropout(dropout)

        # 将卷积模块进行串联构成序列
        self.net = modules.Sequential(self.conv1, self.chomp1, self.relu1,
                                      self.dropout1, self.conv2, self.chomp2,
                                      self.relu2, self.dropout2)

        # 如果输入通道和输出通道不相同,那么通过1x1卷积进行降维,保持通道相同
        self.downsample = modules.Conv1d(n_inputs, n_outputs,
                                         1) if n_inputs != n_outputs else None
        self.relu = modules.ReLU()
        self.init_weights()
Esempio n. 18
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(
         # BinarizeLinear(input_num, hidden_num),
         # nm.ReLU(),
         # BinarizeLinear(hidden_num, output_num),
         # #______________________________________
         # nn.BatchNorm1d(input_num),
         # nn.Linear(input_num, hidden_num),
         # nn.ReLU(),
         # nn.BatchNorm1d(hidden_num),
         # nn.Linear(hidden_num, output_num),
         # #______________________________________
         nm.BatchNorm1d(input_num),
         BinarizeLinear(input_num, hidden_num),
         nm.BatchNorm1d(hidden_num),
         BinarizeLinear(hidden_num, output_num),
         # BinarizeLinear(hidden_num, output_num),
     )
Esempio n. 19
0
    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvNet, self).__init__()
        layers = []
        num_levels = len(num_channels)
        for i in range(num_levels):
            # 扩张系数随着网络层级的增加而呈现指数级增加,以此来增大感受野
            dilation_size = 2**i
            # 计算输入通道数
            in_channels = num_inputs if i == 0 else num_channels[i - 1]
            # 确定每一层的输出通道数
            out_channels = num_channels[i]
            # 从num_channels中抽取每个残差模块的输入通道数和输出通道数
            layers += [
                TemporalBlock(in_channels,
                              out_channels,
                              kernel_size,
                              stride=1,
                              dilation=dilation_size,
                              padding=(kernel_size - 1) * dilation_size,
                              dropout=dropout)
            ]

        # 将所有残差模块堆叠起来组成一个深度卷积网络
        self.network = modules.Sequential(*layers)
Esempio n. 20
0
 def __init__(self, num_class=10):
     super(VGG16, self).__init__()
     self.feature = modules.Sequential(
         # #1,
         modules.Conv2d(3, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         #2
         modules.Conv2d(64, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #3
         modules.Conv2d(64, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         # modules.MaxPool2d(kernel_size=2,stride=2),
         #4
         modules.Conv2d(128, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #5
         modules.Conv2d(128, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #6
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #7
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #8
         modules.Conv2d(256, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #9
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #10
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #11
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #12
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #13
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         modules.AvgPool2d(kernel_size=1, stride=1),
     )
     # 全连接层
     self.classifier = modules.Sequential(
         # #14
         modules.Linear(512, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #15
         modules.Linear(4096, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #16
         modules.Linear(4096, num_class),
     )
Esempio n. 21
0
 def __init__(self):
     super(CNNNet, self).__init__()
     self.cnn_layer = nn.Sequential(nn.Conv2d(in_channels=1))
     self.fc_layer = nn.Sequential()
Esempio n. 22
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(nm.Linear(input_num, hidden_num), nm.ReLU(),
                              nm.Linear(hidden_num, output_num))
Esempio n. 23
0
    def __init__(self):
        super(ModelClass, self).__init__()
        device = torch.device('cpu')
        net = tvM.resnet50()
        summary(net.to(device), (3, 224, 224))

        self.backbone = tvM.resnet50(pretrained=True)

        # 固定输入尺寸224后才可用的全连接层,无法多尺度训练
        # self.backbone.fc = nnM.Sequential(nnM.Linear(2048, 1024), nnM.Linear(1024, 9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.fc = nnM.Sequential(nnM.AdaptiveAvgPool1d(1024), nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.fc = nnM.Sequential(nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.Sequential( nnM.AdaptiveAvgPool1d(9))

        #Expected 3-dimensional tensor, but got 2-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)  ???
        # self.backbone.avgpool = nnM.AvgPool3d(kernel_size=7, stride=1, padding=0)
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        # 打印出来resnet后两层观察(224*224输入):
        #    Bottleneck-172           [-1, 2048, 7, 7]       0
        #    AvgPool2d-173           [-1, 2048, 1, 1]        0
        #    Linear-174                 [-1, 1000]       2,049,000
        # 在AvgPool2d之后,由于池化kernal等于7,已经相当于是全局pool了,输出一定是只有一个像素的2d(channel和batch两个维度),所以上面一直报错“got 2-dimensional”

        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)  ???
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,5))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #size mismatch, m1: [35 x 51200], m2: [25 x 9] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,5))
        # self.backbone.fc = nnM.Linear(25,9)

        #avgpool output is: (35, 2048, 5, 1)
        #Expected 3-dimensional tensor, but got 4-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.AdaptiveAvgPool2d((5,1))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35,)   说明0尺度还是可以设的
        #Expected 3-dimensional tensor, but got 1-dimensional tensor for argument #1 'self' (while checking arguments for adaptive_avg_pool1d)
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((0,5,5))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35, 5, 5)
        #fc output is: (35, 5, 9)     为啥AdaptiveAvgPool1d输出是3d???见结论2
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((5,5,0))
        # self.backbone.fc = nnM.AdaptiveAvgPool1d(9)

        #avgpool output is: (35, 9, 1, 1)
        #求loss时  RuntimeError: invalid argument 3: only batches of spatial targets supported (3D tensors) but got targets of dimension: 1 at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/THNN/generic/SpatialClassNLLCriterion.c:60
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((9,1,1))

        #成功一、删掉 fc层,直接一个GAP-3D搞定。
        # self.backbone.avgpool = nnM.AdaptiveAvgPool3d((9,0,0))

        #成功二、留下fc层,用GAP-2D代替原来的pool2d。
        self.backbone.avgpool = nnM.AdaptiveAvgPool2d((1, 1))
        self.backbone.fc = nnM.Sequential(nnM.Linear(2048, 1024),
                                          nnM.Linear(1024, 9))