Example #1
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()
        low_level_in_planes = 256

        self.conv1 = nn.Conv2d(low_level_in_planes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(48)
        self.relu = nn.ReLU()
        self.last_conv = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.5),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Dropout(0.1),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
        self._init_weight()
Example #2
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 num_layers=1,
                 dropout=0,
                 bidirectional=False):
        super(RNNModel, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional

        if bidirectional:
            num_directions = 2
        else:
            num_directions = 1

        # 创建模型
        self.rnn = modules.LSTM(input_size,
                                hidden_size,
                                num_layers,
                                batch_first=True,
                                dropout=self.dropout,
                                bidirectional=self.bidirectional)
        if self.dropout != 0:
            self.drop = modules.Dropout(dropout)
        # 创建全连接层
        self.fc = modules.Linear(hidden_size * num_directions, output_size)
        # 设置激活函数
        self.activation = modules.LogSoftmax(dim=1)
Example #3
0
    def __init__(self, model_name, input_size, hidden_size, batch_size, kernel_size,
                 out_channels, num_layers=1, dropout=0, bidirectional=False, bn=False):
        super(LACModelUnit, self).__init__()

        # 获得GPU数量
        self.cuda_ids = np.arange(torch.cuda.device_count())

        self.model_name = model_name
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = int(batch_size / len(self.cuda_ids))
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.dropout = dropout
        self.bidirectional = bidirectional
        self.bn = bn

        # 创建LSTM
        self.rnn = modules.LSTM(self.input_size, self.hidden_size, self.num_layers,
                                batch_first=True, bidirectional=self.bidirectional)
        # LSTM激活函数
        self.rnn_act = modules.ReLU()
        # 创建1D-CNN
        self.cnn = modules.Conv1d(1, self.out_channels, self.kernel_size)
        # BN层
        self.bn = modules.BatchNorm1d(self.out_channels)
        # 1D-CNN激活函数
        self.cnn_act = modules.Tanh()
        # Dropout层
        self.drop = modules.Dropout(dropout)

        # 初始化LSTM参数
        self.lstm_hidden, self.lstm_cell = self.init_hidden_cell(self.batch_size)
Example #4
0
    def __init__(self):
        super(ASPP, self).__init__()

        in_planes = 2048
        dilations = [1, 6, 12, 18]

        # all aspp module output feature maps with channel 256
        self.aspp1 = _ASPPModule(in_planes, planes=256, kernel_size=1, padding=0, dilation=dilations[0])
        self.aspp2 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[1], dilation=dilations[1])
        self.aspp3 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[2], dilation=dilations[2])
        self.aspp4 = _ASPPModule(in_planes, planes=256, kernel_size=3, padding=dilations[3], dilation=dilations[3])

        # perform global average pooling on the last feature map of the backbone
        # batchsize must be greater than 1, otherwise exception will be thrown in calculating BatchNorm
        self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                             nn.Conv2d(in_planes, 256, 1, stride=1, bias=False),
                                             nn.BatchNorm2d(256),
                                             nn.ReLU())

        self.p1 = nn.AdaptiveAvgPool2d(1)
        self.p2 = nn.Conv2d(in_planes, 256, 1, stride=1, bias=False)
        self.p3 = nn.BatchNorm2d(256)

        self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(256)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self._init_weight()
Example #5
0
    def __init__(self,
                 n_inputs,
                 n_outputs,
                 kernel_size,
                 stride,
                 dilation,
                 padding,
                 dropout=0.2):
        super(TemporalBlock, self).__init__()
        # 定义残差模块的第一层扩张卷积
        # 经过conv,输出的size为(Batch, input_channel, seq_len + padding),并归一化模型的参数
        self.conv1 = weight_norm(
            modules.Conv1d(n_inputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        # 裁剪掉多出来的padding部分,维持输出时间步为seq_len
        self.chomp1 = Chomp1d(padding)
        self.relu1 = modules.ReLU()
        self.dropout1 = modules.Dropout(dropout)

        #定义残差模块的第二层扩张卷积
        self.conv2 = weight_norm(
            modules.Conv1d(n_outputs,
                           n_outputs,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation))
        self.chomp2 = Chomp1d(padding)
        self.relu2 = modules.ReLU()
        self.dropout2 = modules.Dropout(dropout)

        # 将卷积模块进行串联构成序列
        self.net = modules.Sequential(self.conv1, self.chomp1, self.relu1,
                                      self.dropout1, self.conv2, self.chomp2,
                                      self.relu2, self.dropout2)

        # 如果输入通道和输出通道不相同,那么通过1x1卷积进行降维,保持通道相同
        self.downsample = modules.Conv1d(n_inputs, n_outputs,
                                         1) if n_inputs != n_outputs else None
        self.relu = modules.ReLU()
        self.init_weights()
Example #6
0
    def __init__(self, n_states, n_actions, n_hidden, lr, device):
        super(DDPGActor, self).__init__()
        self.device = device

        self.input = nn.Linear(n_states, n_hidden)
        self.l1 = nn.Linear(n_hidden, n_hidden)
        self.out = nn.Linear(n_hidden, n_actions)

        self.dropout = nn.Dropout(0.1)

        self.optimizer = optim.SGD(self.parameters(), lr=lr)
        self.to(device)
    def __init__(self,
                 h,
                 d_model,
                 k,
                 last_feat_height,
                 last_feat_width,
                 scales=1,
                 dropout=0.1,
                 need_attn=False):
        """
        :param h: number of self attention head
        :param d_model: dimension of model
        :param dropout:
        :param k: number of keys
        """
        super(DeformableHeadAttention, self).__init__()
        assert h == 8  # currently header is fixed 8 in paper
        assert d_model % h == 0
        # We assume d_v always equals d_k, d_q = d_k = d_v = d_m / h
        self.d_k = int(d_model / h)
        self.h = h

        self.q_proj = nn.Linear(d_model, d_model)
        self.k_proj = nn.Linear(d_model, d_model)

        self.scales_hw = []
        for i in range(scales):
            self.scales_hw.append(
                [last_feat_height * 2**i, last_feat_width * 2**i])

        self.dropout = None
        if self.dropout:
            self.dropout = nn.Dropout(p=dropout)

        self.k = k
        self.scales = scales
        self.last_feat_height = last_feat_height
        self.last_feat_width = last_feat_width

        self.offset_dims = 2 * self.h * self.k * self.scales
        self.A_dims = self.h * self.k * self.scales

        # 2MLK for offsets MLK for A_mlqk
        self.offset_proj = nn.Linear(d_model, self.offset_dims)
        self.A_proj = nn.Linear(d_model, self.A_dims)

        self.wm_proj = nn.Linear(d_model, d_model)

        self.need_attn = need_attn

        self.attns = []
        self.offsets = []
Example #8
0
    def __init__(self, rnn_size, embedding_size, input_size, output_size,
                 grids_width, grids_height, dropout_par, device):

        super(VPTLSTM, self).__init__()
        ######参数初始化##########
        self.device = device
        self.rnn_size = rnn_size  # hidden size默认128
        self.embedding_size = embedding_size  # 空间坐标嵌入尺寸64,每个状态用64维向量表示
        self.input_size = input_size  # 输入尺寸6,特征向量长度
        self.output_size = output_size  # 输出尺寸5
        self.grids_width = grids_width
        self.grids_height = grids_height
        self.dropout_par = dropout_par

        ############网络层初始化###############
        # 输入embeded_input,hidden_states
        self.cell = nn.LSTMCell(2 * self.embedding_size, self.rnn_size)

        # 输入Embed层,将长度为input_size的vec映射到embedding_size
        self.input_embedding_layer = nn.Linear(self.input_size,
                                               self.embedding_size)

        # 输入[vehicle_num,grids_height,grids_width,rnn_size]  [26,39,5,128]
        # 输出[vehicle_num,grids_height-12,grids_width-4,rnn_size*4]  [26,27,1,32]
        self.social_tensor_conv1 = nn.Conv2d(in_channels=self.rnn_size,
                                             out_channels=self.rnn_size // 2,
                                             kernel_size=(5, 3),
                                             stride=(2, 1))
        self.social_tensor_conv2 = nn.Conv2d(in_channels=self.rnn_size // 2,
                                             out_channels=self.rnn_size // 4,
                                             kernel_size=(5, 3),
                                             stride=1)
        self.social_tensor_embed = nn.Linear(
            (self.grids_height - 15) * (self.grids_width - 4) *
            self.rnn_size // 4, self.embedding_size)

        # 输出Embed层,将长度为64的hidden_state映射到5
        self.output_layer = nn.Linear(self.rnn_size, self.output_size)

        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(self.dropout_par)
Example #9
0
 def __init__(self, num_class=10):
     super(VGG16, self).__init__()
     self.feature = modules.Sequential(
         # #1,
         modules.Conv2d(3, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         #2
         modules.Conv2d(64, 64, kernel_size=3, padding=1),
         modules.BatchNorm2d(64),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #3
         modules.Conv2d(64, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         # modules.MaxPool2d(kernel_size=2,stride=2),
         #4
         modules.Conv2d(128, 128, kernel_size=3, padding=1),
         modules.BatchNorm2d(128),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #5
         modules.Conv2d(128, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #6
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         #7
         modules.Conv2d(256, 256, kernel_size=3, padding=1),
         modules.BatchNorm2d(256),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #8
         modules.Conv2d(256, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #9
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #10
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         #11
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #12
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         #13
         modules.Conv2d(512, 512, kernel_size=3, padding=1),
         modules.BatchNorm2d(512),
         modules.ReLU(True),
         modules.MaxPool2d(kernel_size=2, stride=2),
         modules.AvgPool2d(kernel_size=1, stride=1),
     )
     # 全连接层
     self.classifier = modules.Sequential(
         # #14
         modules.Linear(512, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #15
         modules.Linear(4096, 4096),
         modules.ReLU(True),
         modules.Dropout(),
         #16
         modules.Linear(4096, num_class),
     )