Example #1
0
 def forward(self, x):
     new_features = super(_DenseLayer, self).forward(x)
     if self.drop_rate > 0:
         new_features = nn.dropout(new_features,
                                   p=self.drop_rate,
                                   training=self.training)
     return torch.cat([x, new_features], 1)
Example #2
0
    def __init__(self, classes_num=10):
        super(AlexNet2, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(in_channels=3,
                      out_channels=64,
                      kernel_size=3,
                      stride=2,
                      padding=1), nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2))

        self.layer2 = nn.Sequential(
            nn.Conv2d(in_channels=64,
                      out_channels=192,
                      kernel_size=3,
                      padding=1), nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2))

        self.layer3 = nn.Sequential(
            nn.Conv2d(in_channels=192,
                      out_channels=384,
                      kernel_size=3,
                      padding=1), nn.ReLU(inplace=True))

        self.layer4 = nn.Sequential(
            nn.Conv2d(in_channels=384,
                      out_channels=256,
                      kernel_size=3,
                      padding=1), nn.ReLU(inplace=True))

        self.layer5 = nn.Sequential(
            nn.Conv2d(in_channels=256,
                      out_channels=256,
                      kernel_size=3,
                      padding=1), nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2))

        self.layer6 = nn.Sequential(
            nn.dropout(), nn.Linear(in_features=256 * 2 * 2,
                                    out_features=4096), nn.ReLU(inplace=True))

        self.layer7 = nn.Sequential(
            nn.dropout(), nn.Linear(in_features=4096, out_features=4096),
            nn.ReLU(inplace=True))

        self.layer8 = nn.Sequential(
            nn.Linear(in_features=4096, out_features=classes_num))
Example #3
0
 def __init__(self):
     super().__init__()
     self.effnet = Efficientnet.from_pretrained(
         'efficientnet-b3',
         weights_path=wp3,
     )
     self.effnet._conv_stem.in_channels = 1
     weight = self.effnet._conv_stem.weight.mean(1, keepdim=True)
     self.effnet._conv_stem.weight = nn.Parameter(weight)
     self.dropout = nn.dropout(0.1)
     self.out = nn.Linear(1536, 24)
     self.step_scheduler_after = 'epoch'
     self.step_scheduler_metric = 'valid_label_rank_avg_prec_sc'
    def __init__(self):
        super(SensorToDryspotBoolModel, self).__init__()
        self.dropout = nn.dropout(0.1)
        self.maxpool = nn.maxpool2d(2, 2)
        self.conv1 = nn.conv2d(1, 32, (7, 7))
        self.conv2 = nn.conv2d(32, 64, (5, 5))
        self.conv3 = nn.conv2d(64, 128, (3, 3))
        self.conv4 = nn.conv2d(128, 256, (3, 3))

        self.fc1 = nn.linear(256, 1024)
        self.fc2 = nn.linear(1024, 512)
        self.fc3 = nn.linear(512, 128)
        self.fc_f = nn.linear(128, 1)
Example #5
0
	def __init__(self, h, d_model, dropout=0.1):
		"""Initialize the class

		[Inputs]
		h : No of heads to be used
		d_model : No of model dimensions
		dropout : dropout rate"""
		super(MultiHeadedAttention, self).__init__()
		assert d_model%h==0

		d_k = d_model//h
		self.h = h
		self.linears = clones(nn.Linear(d_model, d_model), 4)
		self.attn = None
		self.dropout = nn.dropout(p=dropout)
Example #6
0
    def __init__(self, params): #embedding_size, hidden_size, vocab_size, batch_size, layers, dropout, use_gpu):
        super(LSTMLM, self).__init__()
        
        self.use_gpu = params["use_gpu"]
        self.hidden_size = params["rnn_size"]
        self.vocab_size = params["vocab_size"] 
        self.batch_size = params["batch_size"]
        self.embedding_size = params["embedding_size"]
        self.dropout = nn.Dropout(params["dropout"])
        self.layers = params["rnn_layers"]
        self.word_embeddings = nn.Embedding(self.vocab_size, self.embedding_size)

        # added separate layers to have control over each layer
        self.lstms = nn.ModuleList([])
        self.lstms.append(nn.LSTM(self.embedding_size, self.hidden_size))
        for l in range(1,self.layers):
            self.lstms.append(nn.dropout(dropout))
            self.lstms.append(nn.LSTM(self.hidden_size, self.hidden_size))

        self.hidden2output = nn.Linear(self.hidden_size, self.vocab_size)
        self.hidden = self.init_hidden()
Example #7
0
 def __init__(self,
              in_channels,
              out_channels,
              down=True,
              act="relu",
              use_dropout=False):
     super().__init__()
     self.conv = nn.Sequential(
         nn.Conv2d(in_channels,
                   out_channels,
                   4,
                   2,
                   1,
                   bias=False,
                   padding_mode="reflect") if down else nn.ConvTranspose2d(
                       in_channels, out_channels, 4, 2, 1, bias=False),
         nn.BatchNorm2d(out_channels),
         nn.ReLU() if act == "relu" else nn.LeakyReLU(0.2),
     )
     self.use_dropout = use_dropout
     self.dropout = nn.dropout(0.5)
Example #8
0
 def forward(self, inp, adj):
     """
     inp: input_fea [N, in_features]  in_features表示节点的输入特征向量元素个数
     adj: 图的邻接矩阵  [N, N] 非零即一,数据结构基本知识"""
     h = torch.mm(inp, self.W)   # [N, out_features]
     N = h.size()[0]    # N 图的节点数
     a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, N, 2 * self.out_features)
     # [N, N, 2*out_features]
     e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
     # [N, N, 1] => [N, N] 图注意力的相关系数(未归一化)
     zero_vec = -1e12 * torch.ones_like(e)    # 将没有连接的边置为负无穷
     attention = torch.where(adj > 0, e, zero_vec)   # [N, N]
     # 表示如果邻接矩阵元素大于0时,则两个节点有连接,该位置的注意力系数保留,否则需要mask并置为非常小的值,原因是softmax的时候这个最小值会不考虑。
     attention = nn.softmax(attention, dim=1)    # softmax形状保持不变 [N, N],得到归一化的注意力权重!
     attention = nn.dropout(attention, self.dropout, training=self.training)   # dropout,防止过拟合
     h_prime = torch.matmul(attention, h)  # [N, N].[N, out_features] => [N, out_features]
     # 得到由周围节点通过注意力权重进行更新的表示
     if self.concat:
         return self.elu(h_prime)
     else:
         return h_prime 
 def __init__(self, embedding_dim, hidden_dim, encoder_dim, dropout=0.1):
     super(Encoder, self).__init__()
     self.linear1 = nn.Linear(embedding_dim, hidden_dim)
     self.linear2 = nn.Linear(hidden_dim, encoder_dim)
     self.activation = nn.Tanh()
     self.dropout = nn.dropout(dropout)
Example #10
0
 def forward(self, x):
     x = nn.dropout(x, self.dropout, training=self.training)   # dropout,防止过拟合
     x = torch.cat([att(x) for att in self.attentions], dim=1)  # 将每个head得到的表示进行拼接
     x = nn.dropout(x, self.dropout, training=self.training)   # dropout,防止过拟合
     x = self.elu(self.out_att(x))   # 输出并激活
     return self.log_softmax(x, dim=1)  # log_softmax速度变快,保持数值稳定
 def __init__(self, model_part, seq_len, feature_num, global_conv, conv_layers, layer_sizes, learned, embedding_dim, activation, output_activation, transfer, transfer_dim, dropout, posembed=True, batchnorm=True):
     super().__init__()
     
     self.learned = learned
     if learned:
         self.embed = nn.Embedding(AMINO_ACID, LEARNED_DIM, padding_idx=0)
             
     self.conv2 = torch.nn.Conv1d(feature_num, global_conv["filters"], seq_len)
     self.active2 = get_activation(global_conv["activation"])
     
     # posembed
     self.addpos = posembed
     self.batchnorm = batchnorm
     if posembed == True:
         self.posembed = PositionEmbedding(seq_len, POS_DIM, "Random")
         feature_num = feature_num + POS_DIM*2
     
     self.clayer_num = len(conv_layers)
     self.kmer = 0
     # conv
     dw = seq_len
     kw = feature_num
     if self.clayer_num > 0:
         self.convs = nn.ModuleList()
         self.cactive = nn.ModuleList()
         
         if batchnorm:
             self.bns1 = nn.ModuleList()
         for i, conv_layer in enumerate(conv_layers):
             self.kmer += conv_layer["kernel_size"]-1
             self.convs.append(nn.Conv1d(kw, conv_layer["filters"], conv_layer["kernel_size"]))
             dw = dw - conv_layer["kernel_size"] + 1
             kw = conv_layer["filters"]
             self.cactive.append(get_activation(conv_layer["activation"]))
             if batchnorm:
                 self.bns1.append(nn.BatchNorm1d(conv_layer["filters"]))
     else:
         raise ValueError("The number of convolution layer must be larger than zero.")
     
     if batchnorm:
         self.bns1.append(nn.BatchNorm1d(global_conv["filters"]))
     
     self.model_part = model_part
     self.attn, output_size = aggregate_feature(model_part, kw, dw)
     
     output_size = output_size + global_conv["filters"]
     self.layer_num = len(layer_sizes)
     
     self.transfer = transfer
     if transfer:
         self.trans_layer = nn.Linear(transfer_dim, int(output_size/2))
         output_size = int(output_size*3/2)
     
     if self.layer_num > 0:
         if batchnorm:
             self.bns2 = nn.ModuleList()
         self.denses = nn.ModuleList()
         self.dactive = nn.ModuleList()
         
         for i, layer_size in enumerate(layer_sizes):
             self.denses.append(nn.Linear(output_size, layer_size))
             output_size = layer_size
             self.dactive.append(get_activation(activation))
             
             if batchnorm:
                 self.bns2.append(nn.BatchNorm1d(layer_size))
     
     self.dropout = None
     
     if dropout > 0:
         self.dropout = nn.dropout(dropout)
         
     self.output_layer = nn.Linear(output_size, embedding_dim)
     self.output_active = get_activation(output_activation)