예제 #1
0
    def __init__(self):
        super(ASPP, self).__init__()

        in_planes = 2048
        dilations = [1, 6, 12, 18]

        # all aspp module output feature maps with channel 256
        self.aspp1 = _ASPPModule(in_planes, planes=256, kernel_size=1, padding=0, dilation=dilations[0])
        self.aspp2 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[1], dilation=dilations[1])
        self.aspp3 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[2], dilation=dilations[2])
        self.aspp4 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[3], dilation=dilations[3])

        # perform global average pooling on the last feature map of the backbone
        # batchsize must be greater than 1, otherwise exception will be thrown in calculating BatchNorm
        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(in_planes, 256, 1, stride=1, bias=False),
                                             nn.BatchNorm2d(256),
                                             nn.ReLU())

        self.p1 = nn.AdaptiveAvgPool2d(1)
        self.p2 = nn.Conv2d(in_planes, 256, 1, stride=1, bias=False)
        self.p3 = nn.BatchNorm2d(256)

        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(256)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self._init_weight()
예제 #2
0
 def __init__(self, in_planes=None, planes=None, stride=1, downsample=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(in_planes, planes, stride)
     self.bn1 = nn.BatchNorm2d(planes)
     self.relu1 = nn.ReLU()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = nn.BatchNorm2d(planes)
     self.downsample = downsample
     self.stride = stride
     self.relu2 = nn.ReLU(inplace=True)
예제 #3
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_in_planes = 256

        self.conv1 = nn.Conv2d(low_level_in_planes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
        self._init_weight()
예제 #4
0
    def __init__(self, block, layers, num_classes=10):
        super(ResNet, self).__init__()
        self.inplanes = 64
        self.initdata = nn.Conv2d(1,
                                  64,
                                  kernel_size=7,
                                  stride=1,
                                  padding=3,
                                  bias=False)
        self.bn0 = nn.BatchNorm2d(64)
        self.relu0 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self.make_layers(block, 64, layers[0])
        self.layer2 = self.make_layers(block, 128, layers[1], stride=1)
        self.layer3 = self.make_layers(block, 256, layers[2], stride=1)
        self.layer4 = self.make_layers(block, 512, layers[3], stride=2)

        self.avg = nn.AvgPool2d(7, stride=1)
        self.full = nn.Linear(512 * block.expansion, num_classes)
        self.sigmoid = nn.Sigmoid()

        for m in self.modules():
            if isinstance(
                    m,
                    nn.Conv2d):  # isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
                init.kaiming_normal_(m.weight,
                                     mode='fan_out',
                                     nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
예제 #5
0
    def __init__(self, model_name, input_size, hidden_size, batch_size, kernel_size,
                 out_channels, num_layers=1, dropout=0, bidirectional=False, bn=False):
        super(LACModelUnit, self).__init__()

        # 获得GPU数量
        self.cuda_ids = np.arange(torch.cuda.device_count())

        self.model_name = model_name
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = int(batch_size / len(self.cuda_ids))
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.bn = bn

        # 创建LSTM
        self.rnn = modules.LSTM(self.input_size, self.hidden_size, self.num_layers,
                                batch_first=True, bidirectional=self.bidirectional)
        # LSTM激活函数
        self.rnn_act = modules.ReLU()
        # 创建1D-CNN
        self.cnn = modules.Conv1d(1, self.out_channels, self.kernel_size)
        # BN层
        self.bn = modules.BatchNorm1d(self.out_channels)
        # 1D-CNN激活函数
        self.cnn_act = modules.Tanh()
        # Dropout层
        self.drop = modules.Dropout(dropout)

        # 初始化LSTM参数
        self.lstm_hidden, self.lstm_cell = self.init_hidden_cell(self.batch_size)
예제 #6
0
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 kernel_size,
                 stride,
                 dilation,
                 padding,
                 dropout=0.2):
        super(TemporalBlock, self).__init__()
        # 定义残差模块的第一层扩张卷积
        # 经过conv,输出的size为(Batch, input_channel, seq_len + padding),并归一化模型的参数
        self.conv1 = weight_norm(
            modules.Conv1d(n_inputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        # 裁剪掉多出来的padding部分,维持输出时间步为seq_len
        self.chomp1 = Chomp1d(padding)
        self.relu1 = modules.ReLU()
        self.dropout1 = modules.Dropout(dropout)

        #定义残差模块的第二层扩张卷积
        self.conv2 = weight_norm(
            modules.Conv1d(n_outputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        self.chomp2 = Chomp1d(padding)
        self.relu2 = modules.ReLU()
        self.dropout2 = modules.Dropout(dropout)

        # 将卷积模块进行串联构成序列
        self.net = modules.Sequential(self.conv1, self.chomp1, self.relu1,
                                      self.dropout1, self.conv2, self.chomp2,
                                      self.relu2, self.dropout2)

        # 如果输入通道和输出通道不相同,那么通过1x1卷积进行降维,保持通道相同
        self.downsample = modules.Conv1d(n_inputs, n_outputs,
                                         1) if n_inputs != n_outputs else None
        self.relu = modules.ReLU()
        self.init_weights()
예제 #7
0
 def __init__(self, in_planes, reduction=16):
     super(SELayer, self).__init__()
     # 返回1X1大小的特征图,通道数不变
     self.avg_pool = nn.AdaptiveAvgPool1d(1)
     self.fc = nn.Sequential(
         nn.Linear(in_planes, in_planes // reduction, bias=False),
         nn.ReLU(),  # inplace = True, 计算值直接覆盖之前的值,最好默认为False,否则报错
         nn.Linear(in_planes // reduction, in_planes, bias=False),
         nn.Sigmoid())
예제 #8
0
def conv_block(in_channels, out_channels):
    return nn.Sequential(
        nn.Conv1d(in_channels,
                  out_channels,
                  kernel_size=K_SIZE,
                  padding=PADDING),
        nn.BatchNorm1d(out_channels),
        nn.ReLU(),
        nn.MaxPool1d(kernel_size=2),
    )
예제 #9
0
 def __init__(self, gate_channels, reduction_ratio=16):
     super(ChannelGate, self).__init__()
     self.gate_channels = gate_channels
     self.gap1 = nn.AdaptiveAvgPool1d(output_size=1)
     self.gap2 = nn.AdaptiveAvgPool1d(output_size=1)
     self.mlp = nn.Sequential(
         Flatten(),
         nn.Linear(gate_channels, gate_channels // reduction_ratio),
         nn.ReLU(),
         nn.Linear(gate_channels // reduction_ratio, gate_channels))
예제 #10
0
    def __init__(self, in_channels, key_channels, out_channels, scale=1, dropout=0.1, bn_type=None):
        super(SpatialOCR_Module, self).__init__()

        self.object_context_block = ObjectAttentionBlock(in_channels, key_channels, scale, bn_type)
        _in_channels = 2 * in_channels

        self.conv_bn_dropout = nn.Sequential(
            nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout2d(dropout)
        )
예제 #11
0
 def __init__(self, DIM):
     super().__init__()
     NUM_BLOCK = 8
     FEATURE_CHN = 64
     x_dim = int(FEATURE_CHN *
                 (DIM // 2**NUM_BLOCK))  # 2048:8192; 1024:4096
     feature = 256  # 100(original), 64(CW2SQ), 32, 16. choose: 256, 16
     # print('The NUM of ConvBlocK: {}'.format(NUM_BLOCK))
     print('The FC features: {}\n'.format(feature))
     self.create_feat = nn.Linear(x_dim,
                                  feature)  # weight shape: (feature, x_dim)
     self.discriminator = nn.Sequential(nn.BatchNorm1d(feature), nn.ReLU(),
                                        nn.Linear(feature, 2))
예제 #12
0
    def __init__(self, in_channels, out_channels, stride=1, M=2, r=16, L=32):
        """
        :param in_channels:  输入通道维度
        :param out_channels: 输出通道维度   原论文中 输入输出通道维度相同
        :param stride:  步长,默认为1
        :param M:  分支数
        :param r: 特征Z的长度,计算其维度d 时所需的比率(论文中 特征S->Z 是降维,故需要规定 降维的下界)
        :param L:  论文中规定特征Z的下界,默认为32
        """
        super(SK_Conv1d, self).__init__()
        d = max(in_channels // r, L)  # 计算向量Z 的长度d
        self.M = M
        self.out_channels = out_channels
        self.conv = nn.ModuleList()  # 根据分支数量 添加 不同核的卷积操作
        for i in range(M):
            # 为提高效率,原论文中 扩张卷积5x5为 (3X3,dilation=2)来代替, 且论文中建议组卷积G=32,
            # 每组计算只有out_channel/groups = 2 个channel参与.
            self.conv.append(
                nn.Sequential(
                    nn.Conv1d(in_channels,
                              out_channels,
                              3,
                              stride,
                              padding=1 + i,
                              dilation=1 + i,
                              groups=32,
                              bias=False), nn.BatchNorm1d(out_channels),
                    nn.ReLU(inplace=True)))
        self.global_pool = nn.AdaptiveAvgPool1d(
            1)  # 自适应pool到指定维度, 这里指定为1,实现 GAP

        self.fc1 = nn.Sequential(nn.Conv1d(out_channels, d, 1, bias=False),
                                 nn.BatchNorm1d(d),
                                 nn.ReLU(inplace=True))  # 降维
        self.fc2 = nn.Conv1d(d, out_channels * M, 1, 1, bias=False)  # 升维
        # self.fcs = nn.ModuleList(self.fc1, self.fc2)
        self.softmax = nn.Softmax(
            dim=1)  # 指定dim=1  使得两个全连接层对应位置进行softmax,保证 对应位置a+b+..=1
    def __init__(self, n_states, n_actions, n_hidden, lr):
        super(ActorCritic, self).__init__()

        self.input = nn.Linear(n_states, n_hidden)
        self.hidden_1 = nn.Linear(n_hidden, n_hidden)
        self.hidden_2 = nn.Linear(n_hidden, n_hidden)
        self.out_actor_sigma = nn.Linear(n_hidden, n_actions)
        self.out_actor_mu = nn.Linear(n_hidden, n_actions)
        self.out_critic = nn.Sequential(nn.Linear(n_hidden, n_hidden),
                                        nn.ReLU(), nn.Linear(n_hidden, 1))

        #self.bn_1 = nn.BatchNorm1d(n_hidden)
        #self.bn_2 = nn.BatchNorm1d(n_hidden)

        self.optimizer = optim.SGD(self.parameters(), lr=lr)
예제 #14
0
    def __init__(self, in_planes, planes, kernel_size, padding, dilation):
        """
        One single ASPP module
        :param in_planes: input channels
        :param planes: output channels
        :param kernel_size: kernel size in conv
        :param padding: padding
        :param dilation: dilation
        """
        super(_ASPPModule, self).__init__()
        self.atrous_conv = nn.Conv2d(in_planes, planes, kernel_size=kernel_size,
                                     stride=1, padding=padding, dilation=dilation, bias=False)
        self.bn = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU()

        self._init_weight()
예제 #15
0
 def __init__(self, inplane, plane, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplane, plane, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm2d(plane)
     self.conv2 = nn.Conv2d(plane,
                            plane,
                            kernel_size=3,
                            padding=1,
                            stride=stride,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(plane)
     self.conv3 = nn.Conv2d(plane,
                            self.expansion * plane,
                            kernel_size=1,
                            bias=False)
     self.bn3 = nn.BatchNorm2d(self.expansion * plane)
     self.downsample = downsample
     self.stride = stride
     self.relu3 = nn.ReLU(inplace=True)
예제 #16
0
    def __init__(self):
        super().__init__()

        self.relu = nn.ReLU()
        self.avgpool2d = nn.AvgPool2d(2, stride=2)

        #输入部分
        self.conv2d_1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
        self.batchnorm2d = nn.BatchNorm2d(6)

        #中间残差块
        self.conv2d_2 = nn.Conv2d(6, 6, kernel_size=3, padding=1)

        #输出部分
        self.conv2d_3 = nn.Conv2d(6, 6, 5)
        self.flatten = nn.Flatten()
        self.sig = nn.Sigmoid()
        self.linear_1 = nn.Linear(6 * 5 * 5, 64)
        self.linear_2 = nn.Linear(64, 10)
예제 #17
0
파일: model.py 프로젝트: RayneSun/VTPLSTM
    def __init__(self, rnn_size, embedding_size, input_size, output_size,
                 grids_width, grids_height, dropout_par, device):

        super(VPTLSTM, self).__init__()
        ######参数初始化##########
        self.device = device
        self.rnn_size = rnn_size  # hidden size默认128
        self.embedding_size = embedding_size  # 空间坐标嵌入尺寸64,每个状态用64维向量表示
        self.input_size = input_size  # 输入尺寸6,特征向量长度
        self.output_size = output_size  # 输出尺寸5
        self.grids_width = grids_width
        self.grids_height = grids_height
        self.dropout_par = dropout_par

        ############网络层初始化###############
        # 输入embeded_input,hidden_states
        self.cell = nn.LSTMCell(2 * self.embedding_size, self.rnn_size)

        # 输入Embed层,将长度为input_size的vec映射到embedding_size
        self.input_embedding_layer = nn.Linear(self.input_size,
                                               self.embedding_size)

        # 输入[vehicle_num,grids_height,grids_width,rnn_size]  [26,39,5,128]
        # 输出[vehicle_num,grids_height-12,grids_width-4,rnn_size*4]  [26,27,1,32]
        self.social_tensor_conv1 = nn.Conv2d(in_channels=self.rnn_size,
                                             out_channels=self.rnn_size // 2,
                                             kernel_size=(5, 3),
                                             stride=(2, 1))
        self.social_tensor_conv2 = nn.Conv2d(in_channels=self.rnn_size // 2,
                                             out_channels=self.rnn_size // 4,
                                             kernel_size=(5, 3),
                                             stride=1)
        self.social_tensor_embed = nn.Linear(
            (self.grids_height - 15) * (self.grids_width - 4) *
            self.rnn_size // 4, self.embedding_size)

        # 输出Embed层,将长度为64的hidden_state映射到5
        self.output_layer = nn.Linear(self.rnn_size, self.output_size)

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(self.dropout_par)
예제 #18
0
 def __init__(self,
              in_planes,
              planes,
              stride=1,
              downsample=None,
              dilation=1):
     super(Bottleneck, self).__init__()
     self.conv1 = conv1x1(in_planes, planes)
     self.bn1 = nn.BatchNorm2d(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            dilation=dilation,
                            padding=dilation,
                            bias=False)
     self.bn2 = nn.BatchNorm2d(planes)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = nn.BatchNorm2d(planes * self.expansion)
     self.relu = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
예제 #19
0
    def __init__(self, inp_dim, out_dim):
        """
        residual block
        :param inp_dim: input dimension
        :param out_dim: output dimension
        """
        super(Residual, self).__init__()
        # the channel must be at least 1
        out_dim_half = max(1, int(out_dim / 2))

        self.conv1 = nn.Conv2d(inp_dim, out_dim_half, 1, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_dim_half)
        self.relu = nn.ReLU()

        self.conv2 = nn.Conv2d(out_dim_half, out_dim_half, 3, 1, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_dim_half)

        self.conv3 = nn.Conv2d(out_dim_half, out_dim, 1, 1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_dim)

        self.skip_layer = nn.Conv2d(inp_dim, out_dim, 1, 1, bias=False)
        self.skip_layer_bn = nn.BatchNorm2d(out_dim)
예제 #20
0
    def __init__(self, block, layers, BatchNorm=None):
        super(ResNet, self).__init__()
        if BatchNorm is None:
            BatchNorm = nn.BatchNorm2d
        self._BatchNorm = BatchNorm

        # resnet head
        self.in_planes = 64
        self.conv1 = nn.Conv2d(3,
                               self.in_planes,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = BatchNorm(self.in_planes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # middle
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilation=1)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilation=1)
        blocks = [1, 2, 4]
        self.layer4 = self._make_multi_grid_layer(block,
                                                  512,
                                                  blocks=blocks,
                                                  stride=1,
                                                  dilation=2)
예제 #21
0
 def __init__(self, in_channels, key_channels, scale=1, bn_type=None):
     super(ObjectAttentionBlock, self).__init__()
     self.scale = scale
     self.in_channels = in_channels
     self.key_channels = key_channels
     self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
     self.f_pixel = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1,
                   padding=0, bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_object = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True),
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_down = nn.Sequential(
         nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
     self.f_up = nn.Sequential(
         nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0,
                   bias=False),
         nn.BatchNorm2d(self.key_channels),
         nn.ReLU(inplace=True)
     )
예제 #22
0
 def __init__(self, input_num, hidden_num, output_num):
     super(Net, self).__init__()
     self.seq = nm.Sequential(nm.Linear(input_num, hidden_num), nm.ReLU(),
                              nm.Linear(hidden_num, output_num))
예제 #23
0
 def __init__(self, num_class=10):
     super(VGG16, self).__init__()
     self.feature = modules.Sequential(
         # #1,
         modules.Conv2d(3, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         #2
         modules.Conv2d(64, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #3
         modules.Conv2d(64, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         # modules.MaxPool2d(kernel_size=2,stride=2),
         #4
         modules.Conv2d(128, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #5
         modules.Conv2d(128, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #6
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #7
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #8
         modules.Conv2d(256, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #9
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #10
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #11
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #12
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #13
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         modules.AvgPool2d(kernel_size=1, stride=1),
     )
     # 全连接层
     self.classifier = modules.Sequential(
         # #14
         modules.Linear(512, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #15
         modules.Linear(4096, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #16
         modules.Linear(4096, num_class),
     )